prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Run the mni to bold transformation on an fmriprep output"""
import os
def get_parser():
"""Build parser object."""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter, RawDescriptionHelpFormatter
parser = ArgumentParser(
description="""NiWorkflows Utilities""", formatter_class=RawTextHelpFormatter
)
parser.add_argument("fmriprep_dir", action="store", help="fmriprep directory to pull scans from")
parser.add_argument("out_path", action="store", help="the output directory")
parser.add_argument('mni_image', action="store", help="mni template image to use")
parser.add_argument('dseg_path', action="store", help="segmentation to use")
parser.add_argument(
'--n_dummy',
action="store",
help="number of dummy scans",
type=int,
default=4
)
parser.add_argument(
"--omp-nthreads",
action="store",
type=int,
default=os.cpu_count(),
help="Number of CPUs available to individual processes",
)
parser.add_argument(
"--mem-gb",
action="store",
type=int,
default=1,
help="Gigs of ram avialable"
)
return parser
# Get the grand mean std
def roi_grand_std(in_file, dseg_file, out_file=None):
from nilearn import image as nli
from nilearn._utils.niimg import _safe_get_data
from nilearn._utils import check_niimg_4d
import pandas as pd
import os
n_dummy=4
if out_file is None:
out_file = os.getcwd() + '/grand_std.csv'
atlaslabels = nli.load_img(dseg_file).get_fdata()
img_nii = check_niimg_4d(in_file, dtype="auto",)
func_data = nli.load_img(img_nii).get_fdata()[:,:,:,n_dummy:]
ntsteps = func_data.shape[-1]
data = func_data[atlaslabels > 0].reshape(-1, ntsteps)
oseg = atlaslabels[atlaslabels > 0].reshape(-1)
df = | pd.DataFrame(data) | pandas.DataFrame |
from os import path
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import data
def lng_lat_to_x_y(lng, lat, origin_lng, origin_lat) -> tuple:
EARTH_RADIUS = 6371.000
x = np.deg2rad(lng - origin_lng) * EARTH_RADIUS * np.cos(np.deg2rad(lat))
y = np.deg2rad(lat - origin_lat) * EARTH_RADIUS
return x, y
def calculate_characteristics(housing: pd.DataFrame, poi: pd.DataFrame) -> tuple:
categories = poi["type"].unique()
housing[categories] = 0
for i, row in poi.iterrows():
if i % 1000 == 0:
print(f"progress: {int(i * 100 / len(poi))}%")
dist = np.sqrt((housing["x"] - row["x"]) ** 2 + (housing["y"] - row["y"]) ** 2)
score = np.exp(-(dist / row["score"]))
score[np.logical_not(np.isfinite(score))] = 0
housing[row["type"]] += score
return housing, poi, categories
def prepare(housing: pd.DataFrame, poi: pd.DataFrame, city: str) -> tuple:
print("transforming coordinates ...")
origin_lng = housing["lng"].mean()
origin_lat = housing["lat"].mean()
housing["x"], housing["y"] = lng_lat_to_x_y(
housing["lng"], housing["lat"], origin_lng, origin_lat
)
poi["x"], poi["y"] = lng_lat_to_x_y(poi["lng"], poi["lat"], origin_lng, origin_lat)
# del housing["lng"], housing["lat"], poi["lng"], poi["lat"]
print("calculating characteristics ...")
current_dir = path.dirname(path.abspath(__file__))
target_filename = path.join(current_dir, f"{city}_prepared_for_fit.csv")
if path.exists(target_filename):
housing = pd.read_csv(target_filename)
categories = poi["type"].unique()
else:
housing, poi, categories = calculate_characteristics(housing, poi)
# for category in categories:
# housing[category] = (
# housing[category] - housing[category].mean()
# ) / housing[category].std()
housing.to_csv(target_filename)
print("data prepared for fit:")
print(f" number of samples: {len(housing)}")
print(f" number of POIs: {len(poi)}")
print(f" number of characteristics: {len(categories)}")
return housing, poi, categories
def main(city: str):
housing, poi = data.import_data(city)
housing, poi, categories = prepare(housing, poi, city)
# housing = housing.sample(frac=1, ignore_index=True)
housing.sort_values(by="price", ascending=True, inplace=True, ignore_index=True)
plt.figure()
plt.gca().set_aspect(1)
plt.scatter(x=housing["x"], y=housing["y"], s=1, c=housing["price"])
colorbar = plt.colorbar()
colorbar.set_label("Price")
plt.savefig(f"{city}_housing.png")
# poi = poi.sample(frac=1, ignore_index=True)
poi.sort_values(by="score", ascending=True, inplace=True, ignore_index=True)
plt.figure()
plt.gca().set_aspect(1)
plt.scatter(x=poi["x"], y=poi["y"], s=1, c=poi["score"])
colorbar = plt.colorbar()
colorbar.set_label("Impact Scope")
plt.savefig(f"{city}_poi.png")
print("fitting data ...")
model = smf.ols(f"price ~ {' + '.join(categories)}", data=housing)
results = model.fit()
print(results.summary(), file=open(f"{city}_ols.txt", mode="w"))
print("preparing data for prediction ...")
n = 1000
x_fit = np.linspace(housing["x"].min(), housing["x"].max(), n)
y_fit = np.linspace(housing["y"].min(), housing["y"].max(), n)
x_fit, y_fit = np.meshgrid(x_fit, y_fit)
data_fit = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv( | StringIO(self.data1) | pandas.compat.StringIO |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#########################################################################################
# Name: <NAME>
# Student ID: 64180008
# Department: Computer Engineering
# Assignment ID: A3
#########################################################################################
# In[2]:
import pandas as pd
import random
import numpy as np
# In[3]:
#########################################################################################
# QUESTION I
# Description: The parts of question are solved with using pd.Series and pd.DataFrame functions.
#########################################################################################
print("\n")
print("SOLUTION OF QUESTION I:Perform the following tasks with pandas Series")
# In[4]:
print("1.a")
a = | pd.Series([7,11,13,17]) | pandas.Series |
import os
import pandas as pd
base_dir = "extracted_data"
files_list_dir = base_dir + "/files.xlsx"
files_dir = base_dir + "/files"
txt_files_dir = files_dir + "/txt"
pdf_files_dir = files_dir + "/pdf"
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
if not os.path.isdir(files_dir):
os.mkdir(files_dir)
if not os.path.isdir(txt_files_dir):
os.mkdir(txt_files_dir)
if not os.path.isdir(pdf_files_dir):
os.mkdir(pdf_files_dir)
if not os.path.isfile(files_list_dir):
df = | pd.DataFrame({'type': [], 'year': [], 'number': [], 'title': [], 'note': []}) | pandas.DataFrame |
#!/home/ubuntu/anaconda3/bin//python
'''
MIT License
Copyright (c) 2018 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
The code is inspired by https://github.com/erikor/medline project, but the logic to
parse medline XML was substantially modified.
'''
# pre-requisites: pip install elasticsearch
# pip install --upgrade pip
# to execute this code:
# STEP 0: ensure elastic search and kibana are running on port 9200
# and 5601 correspondingly
# STEP 1: make sure you have all the medline XML files downloaded from
# STEP 2: then you run nohup ls *.xml | xargs -n 1 -P 4 python ./parseMedline.py &
# the above step assume quad-core processor, and runs it as daemon process so when
# you exit SSH session, it runs in background.
# this should load the data into elastic search
import pandas as pd
import glob
import sys
import sys, os
descr_filenames = glob.glob("." + "/descr*.txt")
speech_filenames = glob.glob("." + "/speech*.txt")
speakermap_filenames = glob.glob("." + "/*SpeakerMap.txt")
NO_PARTY_SENTENCE = "N"
REPUBLICAN_SENTENCE = "R"
DEMOCRAT_SENTENCE = "D"
BOTH_PARTY_SENTENCE = "B"
republican = ["rnc", "gop", "republican", "republicans", "conservative", "conservatives", "right wing", "alt right", "far right"]
democrat = ["dnc", "democrat", "democrats", "democratic", "liberal", "liberals", "progressive", "progressives", "moderates", "nonconservative", "nonconservatives", "alt left", "far left", "left wing"]
from datetime import datetime
import json
import logging
from collections import deque
from pathlib import Path
import os.path
logging.basicConfig(filename='parse.log',level=logging.INFO)
DESTINATION_FILE = "congress_party_affiliation_sentences.csv"
import spacy
import textacy
nlp = spacy.load('en_core_web_sm')
import nltk
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
def partyTypeSentence(sent):
global NO_PARTY_SENTENCE, REPUBLICAN_SENTENCE, DEMOCRAT_SENTENCE, BOTH_PARTY_SENTENCE
global republican, democrat
from sklearn.feature_extraction.text import CountVectorizer
# extract unigrams and bigrams
vectorizer = CountVectorizer(ngram_range=(1,2))
analyzer = vectorizer.build_analyzer()
sent_analyzer = analyzer(sent)
if any(word in sent_analyzer for word in republican) and any(word in sent_analyzer for word in democrat):
return BOTH_PARTY_SENTENCE
elif any(word in sent_analyzer for word in republican):
return REPUBLICAN_SENTENCE
elif any(word in sent_analyzer for word in democrat):
return DEMOCRAT_SENTENCE
return NO_PARTY_SENTENCE
for speakermap_filename in speakermap_filenames:
try:
prefix = speakermap_filename[2:5]
print("prefix=", prefix)
descr_filename = "./descr_" + str(prefix) + ".txt"
speech_filename = "./speeches_" + str(prefix) + ".txt"
list_descr = []
list_speech = []
list_speakermap = []
list_descr.append(pd.read_csv(descr_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
list_speech.append(pd.read_csv(speech_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
list_speakermap.append(pd.read_csv(speakermap_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
df_descr = pd.concat(list_descr)
df_speech = pd.concat(list_speech)
df_speakermap = pd.concat(list_speakermap)
print("len df_descr=", len(df_descr))
print("len df_speech=", len(df_speech))
print("len df_speakerma=", len(df_speakermap))
list_descr = None
list_speech = None
list_speakermap = None
df_descr_speech_speakermap = pd.merge(pd.merge(df_descr, df_speech, on='speech_id'), df_speakermap, on='speech_id')
df_descr = None
df_speech = None
df_speakermap = None
# convert date
df_descr_speech_speakermap['speech'] = df_descr_speech_speakermap['speech'].fillna('')
df_descr_speech_speakermap['party'] = df_descr_speech_speakermap['party'].fillna('')
df_congressPartySentences = | pd.DataFrame(columns=('congress', 'speech_id', 'speaker_party', 'spoken_party', 'sentence')) | pandas.DataFrame |
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import train_test_split
################Stage-1: Sentence Level Classification
df_train = pd.read_csv('ngramsTrain.csv',header=None)
df_test = pd.read_csv('ngramsTest.csv',header=None)
#Encoding 9 classes for classification
mapping = {"bn_en_":0,"en_":1,"gu_en_":2,"hi_en_":3,"kn_en_":4,"ml_en_":5,"mr_en_":6,"ta_en_":7,"te_en_":8}
classes = ["bn_en_","en_","gu_en_","hi_en_","kn_en_","ml_en_","mr_en_","ta_en_","te_en_"]
languages = ["bengali","english","gujarati","hindi","kannada","malayalam","marathi","tamil","telugu"]
df_train = df_train.replace(mapping)
df_test = df_test.replace(mapping)
y_train = df_train[0]
x_train = df_train[1]
y_test = df_test[0]
x_test = df_test[1]
cv = CountVectorizer()
cv.fit(x_train)
new_x = cv.transform(x_train)
train_dataset = new_x.toarray()
######Naive Bayes Classifier
nb = MultinomialNB()
nb.fit(train_dataset,y_train)
######MaxEntropy i.e., Multi Class Logistic Regression
lg = LogisticRegression(random_state=0)
lg.fit(train_dataset,y_train)
new_x_test = cv.transform(x_test)
y_pred = nb.predict(new_x_test)
y1_pred = lg.predict(new_x_test)
print("F1 Score of Naive bayes for sentence classifier is ",metrics.accuracy_score(y_test,y_pred))
print("F1 Score of Logistic Regression for sentence classifier is ",metrics.accuracy_score(y_test,y1_pred))
#########Testing with a new sentecnce
def ngram_generator(n,word):
i=0
n_grams=''
j=1
while(j<=n):
i=0
while(i<=len(word)-j):
n_grams+=word[i:i+j]+' '
i+=1
j+=1
return n_grams
##Performed for the commented test sentence.
# It can be any with restriction that only 2 mixed languages with english as fixed mixing language.
# test_sentence = "apna time aayega"
test_sentence = input("Enter a sentence to predict its tags: ")
test_ngrams = ''
for item in test_sentence.split():
test_ngrams +=ngram_generator(5,item)
lis = []
lis.append(test_ngrams)
lis = pd.DataFrame(lis)
final = cv.transform(lis[0])
y_final = nb.predict(final) #Applying naive bayes to predict class label for input sentence.
y1_final = lg.predict(final)
label = y_final[0]
#print(y1_final)
print("For given sentence, predicted class label is ",classes[y_final[0]])
# Hence Naive Bayes Classifier and Logistic classifiers classified the given sentence
#with classlabel
#Now calling binary classifier
################Stage 2 : Word level classification.
# Forming individual dataframes from Vocabularies.
en_df = pd.read_csv('eng2.txt',header=None)
lis = []
for i in range(len(en_df)):
lis.append(1)
t = en_df[0]
en_df[0] = lis
en_df[1] = t
te_df = pd.read_csv('telugu.txt',header=None)
for i in range(len(te_df)):
te_df[0][i] = ngram_generator(5,te_df[0][i])
lis = []
for i in range(len(te_df)):
lis.append(8)
t = te_df[0]
te_df[0] = lis
te_df[1] = t
hi_df = pd.read_csv('hindiW.txt',header=None)
for i in range(len(hi_df)):
hi_df[0][i] = ngram_generator(5,hi_df[0][i])
lis = []
for i in range(len(hi_df)):
lis.append(3)
t = hi_df[0]
hi_df[0] = lis
hi_df[1] = t
ta_df = pd.read_csv('tamil.txt',header=None)
for i in range(len(ta_df)):
ta_df[0][i] = ngram_generator(5,ta_df[0][i])
lis = []
for i in range(len(ta_df)):
lis.append(7)
t = ta_df[0]
ta_df[0] = lis
ta_df[1] = t
#mr_df = pd.read_csv('maratiW.txt',header=None)
#for i in range(len(mr_df)):
# mr_df[0][i] = ngram_generator(5,mr_df[0][i])
kn_df = | pd.read_csv('kannadaW.txt',header=None) | pandas.read_csv |
import redis # import redis module
import pyarrow as pa
import pandas as pd
import time
import json
def init_connt(host='localhost',port=6379):
return
class twitter_cache():
def __init__(self,host='localhost',port=6379):
self.r = redis.Redis(host=host, port=port, decode_responses=False)
def add_cache(self,ns='twitter_cache:',key='', value=''):
self.r.set(ns+key,value)
return
def add_df_cache(self,ns='twitter_cache:',key='',df=''):
context = pa.default_serialization_context()
self.r.set(ns+key, context.serialize(df).to_buffer().to_pybytes())
def get_cache(self,ns='twitter_cache:',key=''):
return self.r.get(ns+key)
def get_df_cache(self,ns='twitter_cache:df:',key=''):
context = pa.default_serialization_context()
if type(key) is bytes:
get_key=key
else:
get_key=ns+key
if self.r.get(get_key):
return context.deserialize(self.r.get(get_key))
else:
return pd.DataFrame()
def get_rawcache(self,ns='twitter_cache:raw:',return_df=False):
keys= self.r.keys(pattern=ns+"*")
if not return_df:
return self.r.mget(keys)
else:
#construct a dataframe as return
rawdata=self.r.mget(keys)
rawdata=[x[1:-1].decode("utf-8") for x in rawdata]
rawdata='['+','.join(map(str, rawdata))+']'
return pd.read_json(rawdata,orient='records',dtype=True)
def clear_rawcache(self,ns='twitter_cache:raw:'):
keys= self.r.keys(pattern=ns+"*")
try:
self.r.delete(*keys)
except Exception as e:
print('Error in clearing raw data in cache with namespace {1}. Error Msg: {0}'.format(e,ns))
def get_allcache(self, df_only=False):
keys = self.r.keys('*')
out_dict = '['
#out_df = pd.DataFrame()
tmp_dict = '['
i= 0
for key in keys:
#decode bytes to Utf-8 from redis cache
# str_key=key.decode("utf-8")
tik = time.time()
str_type = self.r.type(key).decode("utf-8")
print('decode time: '+str(round(time.time()-tik,2)*1000000)+'us')
if str_type == "string":
if b"twitter_cache:df:" in key:
#val_tmp=self.get_df_cache(key=key)
#out_dict[key]=val_tmp
try:
# out_df = pd.concat([out_df,self.get_df_cache(key=key)],ignore_index=True)
##tmp_dict[i]=self.get_df_cache(key=key).to_dict('list')
#tmp_dict.append(self.get_df_cache(key=key).to_dict('index'))
tmp_dict=tmp_dict+self.get_df_cache(key=key).to_json(orient='records')[1:-1]+','
#print(pd.DataFrame.from_records(tmp_dict[i]))
i+=1
except Exception as e:
print('****ERROR****')
print('Error merge DataFrame from cache. {}'.format(e))
pass
else:
if not df_only:
#out_dict[key]=self.r.get(key)
#out_dict.append(self.r.get(key).decode("utf-8") )
tik=time.time()
out_dict=out_dict+self.r.get(key).decode("utf-8")[1:-1]+','
print('decode time: '+str(round(time.time()-tik,8)*1000)+'ms')
#out_dict+=str(self.r.get(key).decode("utf-8"))
#rint(pd.read_json(self.r.get(key).decode("utf-8")))
#if str_type == "hash":
# val = self.r.hgetall(key)
#if str_type == "zset":
# val = self.r.zrange(key, 0, -1)
#if str_type == "list":
# val = self.r.lrange(key, 0, -1)
#if str_type == "set":
# val = self.r.smembers(key)
#out_df=pd.DataFrame.from_dict(tmp_dict,'columns')
#print(tmp_dict)
#out_df=pd.DataFrame.from_records(tmp_dict)
out_dict = out_dict[:-1]+']'
if tmp_dict != '[':
tmp_dict=tmp_dict[:-1]+']'
return out_dict, pd.read_json(tmp_dict)
else:
return out_dict, None
def clean_allcache(self,ns='twitter_cache:',CHUNK_SIZE=5000):
cursor = '0'
ns_keys = ns + '*'
while cursor != 0:
cursor, keys = self.r.scan(cursor=cursor, match=ns_keys, count=CHUNK_SIZE)
if keys:
self.r.delete(*keys)
return True
if __name__ == "__main__":
t_cache = twitter_cache()
df=pd.DataFrame({'A':[1,2,3]})
df2= | pd.DataFrame({'A':[4,5,6]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 22:33:07 2018
@author: bruce
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
import os
# set saving path
path_result_freq = "/home/bruce/Dropbox/Project/5.Result/5.Result_Nov/2.freq_domain/"
def correlation_matrix(corr_mx, cm_title):
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels = ['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels = ['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='binary')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
# fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_min_01_comb(corr_mx1 ,corr_mx2, cm_title1, cm_title2):
# find the minimum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx1)
output1 = (temp == temp.min(axis=1)[:,None]) # along rows
temp = np.asarray(corr_mx2)
output2 = (temp == temp.min(axis=1)[:,None]) # along rows
fig, (ax1, ax2) = plt.subplots(1, 2)
# figure 1
im1 = ax1.matshow(output1, cmap='binary')
#fig.colorbar(im1, ax1)
ax1.grid(False)
ax1.set_title(cm_title1)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# figure 2
im2 = ax2.matshow(output2, cmap='binary')
#fig.colorbar(im2, ax2)
ax2.grid(False)
ax2.set_title(cm_title2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels, fontsize=6)
ax1.set_yticklabels(ylabels, fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# eg: plot_mag_db(df_as_85_vsc, 1, "Subject")
def fig_mag_db(signal_in, subject_number = 'subject_number', title = 'title', filename = 'filename'):
plt.figure()
plt.subplot(2,1,1)
plt.plot(signal_in.iloc[2*(subject_number-1), :48030], '-')
plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030], '-')
plt.ylabel('magnitude')
plt.legend(('Retest', 'Test'), loc='upper right')
plt.title(title)
# plt.subplot(2,1,2)
# plt.plot(signal_in.iloc[2*(subject_number-1), :48030].apply(f_dB), '-')
# plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030].apply(f_dB), '-')
# plt.xlabel('Frequency(Hz)')
# plt.ylabel('dB')
# plt.xlim(0,10000)
# plt.legend(('Retest', 'Test'), loc='lower right')
plt.show()
plt.savefig(filename)
# plot time domain signal in one figure
def fig_time_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 100, 0.09765625)
plt.plot(x_label, signal_in.iloc[2*i, :1024], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :1024], '-')
plt.ylabel(sub_title[i])
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
if i < 20:
plt.xticks([])
else:
plt.xlabel('Time (ms)')
plt.suptitle(title) # add a centered title to the figure
plt.show()
# plot frequency domain signal in one figure
def fig_mag_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1300)
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
if i < 20:
plt.xticks([])
else:
plt.xlabel('Frequency(Hz)')
plt.suptitle(title) # add a centered title to the figure
plt.show()
def fig_test_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def fig_retest_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i+1, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def distance_mx(sig_in):
# freq_range -> from 0 to ???
freq_range = 13000
matrix_temp = np.zeros((22, 22))
matrix_temp_square = np.zeros((22, 22))
for i in range(22):
for j in range(22):
temp = np.asarray(sig_in.iloc[2*i, 0:freq_range] - sig_in.iloc[2*j+1, 0:freq_range])
temp_sum = 0
temp_square_sum = 0
for k in range(freq_range):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp[k])
temp_square_sum = temp_square_sum + (abs(temp[k]))**2
matrix_temp[i][j] = temp_sum
matrix_temp_square[i][j] = temp_square_sum
output_1 = pd.DataFrame(matrix_temp)
output_2 = pd.DataFrame(matrix_temp_square)
# output 1 is similar with euclidian diatance eg. x1+jy1 -> sqrt(x1**2 + y1**2)
# output 1 is square result eg. x1+jy1 -> x1**2 + y1**2
return output_1, output_2
def complex_coherence_mx(input_signal):
# compute the magnitude squared coherence based on signal.coherence
# then create the matrix with values
# higher value -> better coherence value
sig_in = input_signal.copy()
matrix_temp = np.zeros((22, 22))
for i in range(22):
for j in range(22):
# temp here is the
temp_sum = 0
sig_in_1 = np.array(sig_in.iloc[2*i, :])
sig_in_2 = np.array(sig_in.iloc[2*j+1, :])
# signal 9606Hz length 106.6ms window length 10ms -> nperseg=96
f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs=9606, nperseg=96)
# delete values lower than 0.01
for l in range(len(temp_Cxy)):
if temp_Cxy[l] < 0.1:
temp_Cxy[l] = 0
# delete finish
# test
'''
if i ==0 and j == 0:
plt.figure()
plt.semilogy(f, temp_Cxy)
plt.title("test in complex_coherence_mx")
plt.show()
'''
# test finish
for k in range(len(temp_Cxy)):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp_Cxy[k])
matrix_temp[i][j] = temp_sum
output_3 = pd.DataFrame(matrix_temp)
return output_3
def fig_coherence_in_1(signal_in, threshold_Cxy = None, title = 'title', title2 = 'title2'):
# threshold_Cxy is used for setting minimum value
Cxy_sum = pd.DataFrame()
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
sig_in_1 = signal_in.iloc[i, :]
sig_in_2 = signal_in.iloc[i+22, :]
# signal 9606Hz length 106.6ms window length 10ms -> nperseg=96
# no zero padding
# f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs=9606, nperseg=128)
# with zero padding
f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs = 9606, nperseg=512, nfft=19210)
# print("shape of temp_Cxy is")
# print (temp_Cxy.shape)
# delete value lower than 0.05
if (threshold_Cxy != None):
for l in range(len(temp_Cxy)):
if temp_Cxy[l] < threshold_Cxy:
temp_Cxy[l] = 0
# delete finish
Cxy_sum = Cxy_sum.append(pd.DataFrame(np.reshape(temp_Cxy, (1,9606))), ignore_index=True)
plt.subplot(11,2,i+1)
plt.plot(f, temp_Cxy)
plt.ylabel(sub_title[i])
plt.xlim(0,2000)
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
# plot aveerage of 22 subjects
plt.figure()
plt.subplot(1,1,1)
Cxy_avg = Cxy_sum.mean(axis=0)
plt.plot(f, Cxy_avg)
plt.title('average of 22 subjects based on '+ title2)
plt.xlim(0,2000)
plt.show()
#################################
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
# for linux
df_EFR=pd.read_pickle('/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl')
# for mac
# df_EFR=pd.read_pickle('/Users/bruce/Dropbox/Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_avg = | pd.DataFrame() | pandas.DataFrame |
import numpy as np; import pandas as pd
from pyg_timeseries._math import stdev_calculation_ewm, skew_calculation, cor_calculation_ewm, covariance_calculation, corr_calculation_ewm, LR_calculation_ewm, variance_calculation_ewm, _w
from pyg_timeseries._decorators import compiled, first_, _data_state
from pyg_base import pd2np, clock, loop_all, loop, is_pd, is_df, presync, df_concat
__all__ = ['ewma', 'ewmstd', 'ewmvar', 'ewmskew', 'ewmrms', 'ewmcor', 'ewmcorr', 'ewmLR', 'ewmGLM',
'ewma_', 'ewmstd_', 'ewmskew_', 'ewmrms_', 'ewmcor_', 'ewmvar_','ewmLR_', 'ewmGLM_',]
############################################
##
## compiled functions, unfortunately, both these methods are much slower
##
###########################################
# import numba
# from numba import int32, float32 # import the types
# from numba.experimental import jitclass
# spec = [
# ('t0', float32), # a simple scalar field
# ('t1', float32), # a simple scalar field
# ('t', float32), # a simple scalar field
# ('a', float32), # a simple scalar field
# ('w', float32), # a simple scalar field
# ]
# @jitclass(spec)
# class c_ewma(object):
# def __init__(self, a, t, t0, t1, w):
# self.a = a
# self.t = t
# self.t0 = t0
# self.t1 = t1
# self.w = w
# def push(self, a, t):
# if np.isnan(a):
# return np.nan
# if t == self.t:
# self.t1 = self.t1 + (1-self.w) * (a - self.a)
# return self.t1/self.t0
# else:
# p = self.w if np.isnan(t) else self.w**(t-self.t)
# self.t0 = self.t0 * p + (1-self.w)
# self.t1 = self.t1 * p + (1-self.w) * a
# return self.t1/self.t0
# @compiled
# def _ewma_(ai, ti, ai0, ti0, t0, t1, w):
# """
# we receive
# - current values, (ai, ti)
# - previous values (ai0, ti0)
# - current state of the moments t0, t1
# - parameters, w
# We return:
# result, current values, updated moments
# res, a, t, t0, t1
# """
# if np.isnan(ai):
# res = np.nan
# return res, ai0, ti0, t0, t1
# else:
# if ti == ti0:
# t1 = t1 + (1-w) * (ai - ai0)
# res = t1/t0
# return res, ai, ti, t0, t1
# else:
# p = w**(ti-ti0)
# t0 = t0 * p + (1-w)
# t1 = t1 * p + (1-w) * ai
# res = t1/t0
# return res, ai, ti, t0, t1
@pd2np
@compiled
def _ewma(a, n, time, t = np.nan, t0 = 0, t1 = 0):
if n == 1:
return a, t, t0, t1
w = _w(n)
res = np.empty_like(a)
i0 = 0
for i in range(a.shape[0]):
if np.isnan(a[i]):
res[i] = np.nan
else:
if time[i] == t:
t1 = t1 + (1-w) * (a[i] - a[i0])
else:
p = w if np.isnan(time[i]) else w**(time[i]-t)
t0 = t0 * p + (1-w)
t1 = t1 * p + (1-w) * a[i]
t = time[i]
i0 = i
res[i] = np.nan if t0 == 0 else t1/t0
return res, t, t0, t1
@pd2np
@compiled
def _ewmrms(a, n, time, t = np.nan, t0 = 0., t2 = 0.):
if n == 1:
return a, t, t0, t2
w = _w(n)
res = np.empty_like(a)
i0 = 0
for i in range(a.shape[0]):
if np.isnan(a[i]):
res[i] = np.nan
else:
if time[i] == t:
t2 = t2 + (1-w) * (a[i]**2 - a[i0]**2)
else:
p = w if np.isnan(time[i]) else w**(time[i]-t)
v = a[i]
t0 = t0 * p + (1-w)
t2 = t2 * p + (1-w) * v**2
t = time[i]
i0 = i
res[i] = np.nan if t0 == 0 else np.sqrt(t2/t0)
return res, t, t0, t2
@pd2np
@compiled
def _ewmstd(a, n, time, t = np.nan, t0 = 0, t1 = 0, t2 = 0, w2 = 0, min_sample = 0.25, bias = False, calculator = stdev_calculation_ewm):
if n == 1:
return np.full_like(a, 0.0), t, t0, t1, t2, w2
w = _w(n)
res = np.empty_like(a)
i0 = 0
for i in range(a.shape[0]):
if np.isnan(a[i]):
res[i] = np.nan
else:
if time[i] == t:
t1 = t1 + (1-w) * (a[i] - a[i0])
t2 = t2 + (1-w) * (a[i]**2 - a[i0]**2)
else:
p = w if np.isnan(time[i]-t) else w**(time[i]-t)
v = a[i]
t0 = t0 * p + (1-w)
w2 = w2 * p**2 + (1-w)**2
t1 = t1 * p + (1-w) * v
t2 = t2 * p + (1-w) * v**2
t = time[i]
i0 = i
res[i] = calculator(t0, t1, t2, w2 = w2, min_sample = min_sample, bias = bias)
return res, t, t0, t1, t2, w2
@pd2np
@compiled
def _ewmcor(a, b, ba, n, time, t = np.nan, t0 = 0, a1 = 0, a2 = 0, b1 = 0, b2 = 0, ab = 0, w2 = 0, min_sample = 0.25, bias = False):
"""
_ewmcor(a, b, ba, n, time, t)
n = 50
t = np.nan; t0 = 0; a1 = 0; a2 = 0; b1 = 0; b2 = 0; ab = 0; w2 = 0; min_sample = 0.25; bias = False
data, t, t0, a1, a2, b1, b2, ab, w2 = _ewmcor(a, b, ba, 200, time, t = np.nan, t0 = 0, a1 = 0, a2 = 0, b1 = 0, b2 = 0, ab = 0, w2 = 0, min_sample = 0.25, bias = False)
pd.Series(data, drange(-9999)).plot()
"""
if n == 1:
return np.full_like(a, np.nan), t, t0, a1, a2, b1, b2, ab, w2
w = _w(n)
res = np.empty_like(a)
i0 = 0
for i in range(a.shape[0]):
if np.isnan(a[i]) or np.isnan(b[i]):
res[i] = np.nan
else:
if time[i] == t:
a1 = a1 + (1-w) * (a[i] - a[i0])
a2 = a2 + (1-w) * (a[i]**2 - a[i0]**2)
b1 = b1 + (1-w) * (b[i] - b[i0])
b2 = b2 + (1-w) * (b[i]**2 - b[i0]**2)
ab = ab + (1-w) * (ba[i] - ba[i0])
else:
p = w if np.isnan(time[i]) else w**(time[i]-t)
t0 = t0 * p + (1-w)
w2 = w2 * p**2 + (1-w)**2
a1 = a1 * p + (1-w) * a[i]
a2 = a2 * p + (1-w) * a[i]**2
b1 = b1 * p + (1-w) * b[i]
b2 = b2 * p + (1-w) * b[i]**2
ab = ab * p + (1-w) * ba[i]
t = time[i]
i0 = i
res[i0] = cor_calculation_ewm(t0 = t0, a1 = a1, a2 = a2, b1 = b1, b2 = b2, ab = ab, w2 = w2, min_sample = min_sample, bias = bias)
return res, t, t0, a1, a2, b1, b2, ab, w2
@compiled
def _ewmcorr(a, n, a0 = None, a1 = None, a2 = None, aa0 = None, aa1 = None, w2 = None, min_sample = 0.25, bias = False):
"""
"""
m = a.shape[1]
if n == 1:
return np.full((a.shape[0], m, m), np.nan), a0, a1, a2, aa0, aa1, w2
p = w = _w(n)
v = 1 - w
res = np.zeros((a.shape[0], m, m))
a0 = np.zeros(m) if a0 is None else a0
a1 = np.zeros(m) if a1 is None else a1
a2 = np.zeros(m) if a2 is None else a2
aa1 = np.zeros((m,m)) if aa1 is None else aa1
aa0 = np.zeros((m,m)) if aa0 is None else aa0
w2 = np.zeros(m) if w2 is None else w2
for i in range(a.shape[0]):
for j in range(m):
if ~np.isnan(a[i,j]):
w2[j] = w2[j] * p**2 + v**2
a0[j] = a0[j] * p + v
a1[j] = a1[j] * p + v * a[i,j]
a2[j] = a2[j] * p + v * a[i,j] ** 2
for j in range(m):
res[i, j, j] = 1.
if np.isnan(a[i,j]):
res[i, j, :] = np.nan #if i == 0 else res[i-1, j, :] # we ffill correlations
res[i, :, j] = np.nan #if i == 0 else res[i-1, :, j]
else:
for k in range(j):
if ~np.isnan(a[i,k]):
aa0[j,k] = aa0[j,k] * p + v
aa1[j,k] = aa1[j,k] * p + v * a[i, j] * a[i, k]
res[i, k, j] = res[i, j, k] = corr_calculation_ewm(a0 = a0[j], a1 = a1[j], a2 = a2[j], aw2 = w2[j],
b0 = a0[k], b1 = a1[k], b2 = a2[k], bw2 = w2[k],
ab = aa1[j,k], ab0 = aa0[j,k],
min_sample = min_sample, bias = bias)
return res, a0, a1, a2, aa0, aa1, w2
@compiled
def _ewmcovar(a, n, a0 = None, a1 = None, aa0 = None, aa1 = None, min_sample = 0.25, bias = False):
"""
"""
m = a.shape[1]
if n == 1:
return np.full((a.shape[0], m, m), np.nan), a0, a1, aa0, aa1
p = w = _w(n)
v = 1 - w
res = np.zeros((a.shape[0], m, m))
a0 = np.zeros(m) if a0 is None else a0
a1 = np.zeros(m) if a1 is None else a1
aa1 = np.zeros((m,m)) if aa1 is None else aa1
aa0 = np.zeros((m,m)) if aa0 is None else aa0
for i in range(a.shape[0]):
for j in range(m):
if ~np.isnan(a[i,j]):
a0[j] = a0[j] * p + v
a1[j] = a1[j] * p + v * a[i,j]
for j in range(m):
if np.isnan(a[i,j]):
res[i, j, :] = np.nan #if i == 0 else res[i-1, j, :] # we ffill correlations
res[i, :, j] = np.nan #if i == 0 else res[i-1, :, j]
else:
for k in range(j+1):
if ~np.isnan(a[i,k]):
aa0[j,k] = aa0[j,k] * p + v
aa1[j,k] = aa1[j,k] * p + v * a[i, j] * a[i, k]
res[i, k, j] = res[i, j, k] = covariance_calculation(a0 = a0[j], a1 = a1[j],
b0 = a0[k], b1 = a1[k],
ab = aa1[j,k], ab0 = aa0[j,k],
min_sample = min_sample, bias = bias)
return res, a0, a1, aa0, aa1
def ewmcovar_(a, n, min_sample = 0.25, bias = False, instate = None, join = 'outer', method = None):
"""
This calculates a full correlation matrix as a timeseries. Also returns the recent state of the calculations.
:Returns:
---------
a dict with:
- data: t x n x n covariance matrix
- index: timeseries index
- columns: columns of original data
See ewmcorr for full details.
"""
state = {} if instate is None else instate
arr = df_concat(a, join = join, method = method)
if isinstance(arr, np.ndarray):
res, a0, a1, aa0, aa1 = _ewmcovar(arr, n, min_sample = min_sample, bias = bias, **state)
if res.shape[1] == 2:
res = res[:, 0, 1]
return dict(data = res, columns = None, index = None, state = dict(a0=a0, a1=a1, aa0=aa0, aa1=aa1))
elif is_df(arr):
index = arr.index
columns = list(arr.columns)
res, a0, a1, aa0, aa1 = _ewmcovar(arr.values, n, min_sample = min_sample, bias = bias, **state)
state = dict(a0=a0, a1=a1, aa0=aa0, aa1=aa1)
return dict(data = res, columns = columns, index = index, state = state)
else:
raise ValueError('unsure how to calculate correlation matrix for a %s'%a)
ewmcovar_.output = ['data', 'columns', 'index', 'state']
def ewmcovar(a, n, min_sample = 0.25, bias = False, instate = None, join = 'outer', method = None):
"""
This calculates a full covariance matrix as a timeseries.
:Parameters:
----------
a : np.array or a pd.DataFrame
multi-variable timeseries to calculate correlation for
n : int
days for which rolling correlation is calculated.
min_sample : float, optional
Minimum observations needed before we calculate correlation. The default is 0.25.
bias : bool, optional
input to stdev calculations, the default is False.
instate : dict, optional
historical calculations so far.
:Returns:
-------
covariance (as t x n x n np.array)
:Example: a pair of ts
---------
>>> a = pd.DataFrame(np.random.normal(0,3,(10000,10)), drange(-9999))
>>> res = ewmcovar(a, 250)
>>> # We first check that diagonal is indeed the (biased) variance of the variables:
>>> ratio = pd.Series([res[-1][i,i] for i in range(10)]) / ewmvar(a, 250, bias=True).iloc[-1]
>>> assert ratio.max() < 1.0001 and ratio.min() > 0.9999
To access individually, here we calculate the correlation between 0th and 1st timeseries. That correlation is close to 0 (Fisher distribution) so...
>>> cor = pd.Series(res[:,0,1] / np.sqrt(res[:,0,0] * res[:,1,1]), a.index)
>>> cor.plot()
>>> assert cor.max() < 0.3 and cor.min() > -0.3
"""
return ewmcovar_(a, n, min_sample = min_sample, bias = bias, instate = instate , join = join, method = method).get('data')
def ewmcorr_(a, n, min_sample = 0.25, bias = False, instate = None, join = 'outer', method = None):
"""
This calculates a full correlation matrix as a timeseries. Also returns the recent state of the calculations.
See ewmcorr for full details.
"""
state = {} if instate is None else instate
arr = df_concat(a, join = join, method = method)
if isinstance(arr, np.ndarray):
res, a0, a1, a2, aa0, aa1, w2 = _ewmcorr(arr, n, min_sample = min_sample, bias = bias, **state)
if res.shape[1] == 2:
res = res[:, 0, 1]
return dict(data = res, state = dict(a0=a0, a1=a1, a2=a2, aa0=aa0, aa1=aa1, w2 = w2))
elif is_df(arr):
index = arr.index
columns = list(arr.columns)
res, a0, a1, a2, aa0, aa1, w2 = _ewmcorr(arr.values, n, min_sample = min_sample, bias = bias, **state)
state = dict(a0=a0, a1=a1, a2=a2, aa0=aa0, aa1=aa1, w2 = w2)
if arr.shape[1] == 2:
res = | pd.Series(res[:,0,1], index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 21:11:50 2020
@author: huiyeon
"""
##################
# RNN 실행해보기 #
##################
# import package ------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import font_manager, rc
font_name = font_manager.FontProperties(fname="C:/Windows/Fonts/KOPUBDOTUMMEDIUM.TTF").get_name()
rc('font', family=font_name)
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# load train data ------------------------------
data = pd.read_csv('after_spacing.csv')
data.head()
# text 전처리되서 사라진 id
data[data['after_spacing'].isnull()==True]['id']
np.sum(pd.isnull(data))
null_data = data[data['after_spacing'].isnull()==True]
del data['Unnamed: 0']
train = data[['id', 'after_spacing', 'smishing']]
train.head()
# null값 제거
train.dropna(axis=0, inplace=True)
train.reset_index(inplace=True)
del train['index']
np.sum( | pd.isnull(train) | pandas.isnull |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.widgets import CheckButtons
import utils
def visualize(file_name):
# Enter CSV to process:
word_count = | pd.read_csv(file_name) | pandas.read_csv |
import argparse
import configparser
import json
import numpy as np
import os
import pandas as pd
from pathlib import Path
import skimage.io
import skimage.transform
import sys
import time
import torch
import torch.utils.data
import torchvision
from tqdm import tqdm
sys.path.insert(1, '/home/xview3/src') # use an appropriate path if not in the docker volume
from xview3.processing.constants import FISHING, NONFISHING, PIX_TO_M
from xview3.eval.prune import nms, confidence_pruning
from xview3.postprocess.v2.model_simple import Model
import xview3.models
from xview3.utils import clip
import xview3.transforms
import xview3.eval.ensemble
# Map from channel names to filenames.
channel_map = {
'vv': 'VV_dB.tif',
'vh': 'VH_dB.tif',
'bathymetry': 'bathymetry.tif',
'wind_speed': 'owiWindSpeed.tif',
'wind_quality': 'owiWindQuality.tif',
'wind_direction': 'owiWindDirection.tif',
}
def center(coord):
return (coord[0] + (coord[2] / 2), coord[1] + (coord[3] / 2))
class SceneDataset(object):
def __init__(self, image_folder, scene_ids, channels):
self.image_folder = image_folder
self.scene_ids = scene_ids
self.channels = channels
def __len__(self):
return len(self.scene_ids)
def __getitem__(self, idx):
scene_id = self.scene_ids[idx]
# Load scene channels.
# We always load bathymetry so we can eliminate detections on land.
def get_channel(channel, shape=None):
path = os.path.join(self.image_folder, scene_id, channel_map[channel])
print(scene_id, 'read', path)
cur = skimage.io.imread(path)
cur = torch.tensor(cur, dtype=torch.float32)
# If not same size as first channel, resample before chipping
# to ensure chips from different channels are co-registered
if shape is not None and cur.shape != shape:
cur = torchvision.transforms.functional.resize(img=cur.unsqueeze(0), size=shape)[0, :, :]
return cur
im_channels = [get_channel(self.channels[0])] # nb this precludes vv/vh being first channel
for channel in self.channels[1:]:
if channel == "vv_over_vh":
vvovervh = get_channel("vv", shape=im_channels[0].shape) / get_channel("vh", shape=im_channels[0].shape)
vvovervh = np.nan_to_num(vvovervh, nan=0, posinf=0, neginf=0)
im_channels.append(torch.tensor(vvovervh, dtype=torch.float32))
else:
im_channels.append(get_channel(channel, shape=im_channels[0].shape))
# Stack channels.
im = torch.stack(im_channels, dim=0)
print(scene_id, 'done reading')
return scene_id, im
def process_scene(args, clip_boxes, bbox_size, device, weight_files, model, postprocess_model, detector_transforms, postprocess_transforms, scene_id, im):
with torch.no_grad():
if im.shape[1] < args.window_size or im.shape[2] < args.window_size:
raise Exception('image for scene {} is smaller than window size'.format(scene_id))
# Outputs for each member of the ensemble/test-time-augmentation.
member_outputs = []
member_infos = [(0, 0, False, False)]
#member_infos = [(0, 0, False, False), (0, 0, True, False), (757, 757, False, False), (1515, 1515, True, False)]
#member_infos = [(0, 0, False, False), (757, 757, True, False)]
#member_infos = [(0, 0, False, False), (0, 0, True, False), (0, 0, False, True), (0, 0, True, True)]
for weight_file in weight_files:
model.load_state_dict(torch.load(weight_file, map_location=device))
for member_idx, (args_row_offset, args_col_offset, args_fliplr, args_flipud) in enumerate(member_infos):
predicted_points = []
# Loop over windows.
row_offsets = [0] + list(range(
args.window_size-2*args.padding - args_row_offset,
im.shape[1]-args.window_size,
args.window_size-2*args.padding,
)) + [im.shape[1]-args.window_size]
col_offsets = [0] + list(range(
args.window_size-2*args.padding - args_col_offset,
im.shape[2]-args.window_size,
args.window_size-2*args.padding,
)) + [im.shape[2]-args.window_size]
member_start_time = time.time()
for row_offset in row_offsets:
print('{} [{}/{}] (elapsed={})'.format(scene_id, row_offset, row_offsets[-1], time.time()-member_start_time))
for col_offset in col_offsets:
crop = im[:, row_offset:row_offset+args.window_size, col_offset:col_offset+args.window_size]
crop = torch.clone(crop)
crop, _ = detector_transforms(crop, None)
crop = crop[0:2, :, :]
if args_fliplr:
crop = torch.flip(crop, dims=[2])
if args_flipud:
crop = torch.flip(crop, dims=[1])
crop = crop.to(device)
output = model([crop])[0]
output = {k: v.to("cpu") for k, v in output.items()}
# Only keep output detections that are within bounds based
# on window size and padding.
keep_bounds = [
args.padding,
args.padding,
args.window_size - args.padding,
args.window_size - args.padding,
]
if row_offset == 0:
keep_bounds[0] = 0
if col_offset == 0:
keep_bounds[1] = 0
if row_offset >= im.shape[1] - args.window_size:
keep_bounds[2] = args.window_size
if col_offset >= im.shape[2] - args.window_size:
keep_bounds[3] = args.window_size
keep_bounds[0] -= args.overlap
keep_bounds[1] -= args.overlap
keep_bounds[2] += args.overlap
keep_bounds[3] += args.overlap
for idx, box in enumerate(output["boxes"]):
# Determine the predicted point, in transformed image coordinates.
if clip_boxes:
# Boxes on edges of image might not be the right size.
if box[0] < bbox_size:
pred_col = int(box[2] - bbox_size)
elif box[2] >= crop.shape[2]-bbox_size:
pred_col = int(box[0] + bbox_size)
else:
pred_col = int(np.mean([box[0], box[2]]))
if box[1] < bbox_size:
pred_row = int(box[3] - bbox_size)
elif box[3] >= crop.shape[1]-bbox_size:
pred_row = int(box[1] + bbox_size)
else:
pred_row = int(np.mean([box[1], box[3]]))
else:
pred_row = int(np.mean([box[1], box[3]]))
pred_col = int(np.mean([box[0], box[2]]))
# Undo any transformations.
if args_fliplr:
pred_col = crop.shape[2] - pred_col
if args_flipud:
pred_row = crop.shape[1] - pred_row
# Compare against keep_bounds, which is pre-transformation.
if pred_row < keep_bounds[0] or pred_row >= keep_bounds[2]:
continue
if pred_col < keep_bounds[1] or pred_col >= keep_bounds[3]:
continue
label = output["labels"][idx].item()
is_fishing = label == FISHING
is_vessel = label in [FISHING, NONFISHING]
if "lengths" in output:
length = output["lengths"][idx].item()
else:
length = 0
score = output["scores"][idx].item()
scene_pred_row = row_offset + pred_row
scene_pred_col = col_offset + pred_col
predicted_points.append([
scene_pred_row,
scene_pred_col,
scene_id,
is_vessel,
is_fishing,
length,
score,
])
member_pred = pd.DataFrame(
data=predicted_points,
columns=(
"detect_scene_row",
"detect_scene_column",
"scene_id",
"is_vessel",
"is_fishing",
"vessel_length_m",
"score",
),
)
print("[ensemble-member {}] {} detections found".format(member_idx, len(member_pred)))
member_outputs.append(member_pred)
# Merge ensemble members into one dataframe.
pred = xview3.eval.ensemble.merge(member_outputs)
# Pruning Code
if args.nms_thresh is not None:
pred = nms(pred, distance_thresh=args.nms_thresh)
if args.conf is not None:
pred = confidence_pruning(pred, threshold=args.conf)
# Postprocessing Code
bs = 32
crop_size = 128
pred = pred.reset_index(drop=True)
for x in range(0, len(pred), bs):
batch_df = pred.iloc[x : min((x+bs), len(pred))]
crops, indices = [], []
for idx,b in batch_df.iterrows():
indices.append(idx)
row, col = b['detect_scene_row'], b['detect_scene_column']
crop = im[:, row-crop_size//2:row+crop_size//2, col-crop_size//2:col+crop_size//2]
crop = torch.clone(crop)
crop, _ = postprocess_transforms(crop, None)
crop = crop[:, 8:120, 8:120]
crop = torch.nn.functional.pad(crop, (8, 8, 8, 8))
crops.append(crop)
t = postprocess_model(torch.stack(crops, dim=0).to(device))
t = [tt.cpu() for tt in t]
pred_length, pred_confidence, pred_correct, pred_source, pred_fishing, pred_vessel = t
for i in range(len(indices)):
index = x + i
# Prune confidence=LOW.
if pred_confidence[i, :].argmax() == 0 and args.mode == 'full':
elim_inds.append(index)
continue
pred.loc[index, 'vessel_length_m'] = pred_length[i].item()
if args.mode in ['full', 'attribute']:
pred.loc[index, 'fishing_score'] = pred_fishing[i, 1].item()
pred.loc[index, 'vessel_score'] = pred_vessel[i, 1].item()
pred.loc[index, 'low_score'] = pred_confidence[i, 0].item()
pred.loc[index, 'is_fishing'] = (pred_fishing[i, 1] > 0.5).item() & (pred_vessel[i, 1] > 0.5).item()
pred.loc[index, 'is_vessel'] = (pred_vessel[i, 1] > 0.5).item()
pred.loc[index, 'correct_score'] = pred_correct[i, 1].item()
if args.mode == 'full':
pred.loc[index, 'score'] = pred_correct[i, 1].item()
if args.drop_cols:
good_columns = [
'detect_scene_row',
'detect_scene_column',
'scene_id',
'is_vessel',
'is_fishing',
'vessel_length_m',
]
bad_columns = []
for column_name in pred.columns:
if column_name in good_columns:
continue
bad_columns.append(column_name)
pred = pred.drop(columns=bad_columns)
if args.vessels_only:
pred = pred[pred.is_vessel == True]
if args.save_crops:
pred = pred.reset_index(drop=True)
detect_ids = [None]*len(pred)
for index, label in pred.iterrows():
row, col = label['detect_scene_row'], label['detect_scene_column']
vh = im[0, row-crop_size//2:row+crop_size//2, col-crop_size//2:col+crop_size//2].numpy()
vv = im[1, row-crop_size//2:row+crop_size//2, col-crop_size//2:col+crop_size//2].numpy()
vh = np.clip((vh+50)*255/70, 0, 255).astype('uint8')
vv = np.clip((vv+50)*255/70, 0, 255).astype('uint8')
detect_id = '{}_{}'.format(scene_id, index)
detect_ids[index] = detect_id
skimage.io.imsave(os.path.join(args.output, '{}_vh.png'.format(detect_id)), vh)
skimage.io.imsave(os.path.join(args.output, '{}_vv.png'.format(detect_id)), vv)
pred.insert(len(pred.columns), 'detect_id', detect_ids)
return pred
def main(args, config):
# Create output directories if it does not already exist
Path(os.path.split(args.output)[0]).mkdir(parents=True, exist_ok=True)
# os.environ["CUDA_VISIBLE_DEVICES"]="3"
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
channels = ['vh', 'vv', 'bathymetry']
model_name = config.get("training", "Model")
transform_names = config.get("data", "Transforms").split(",")
clip_boxes = config.getboolean("data", "ClipBoxes", fallback=False)
bbox_size = config.getint("data", "BboxSize", fallback=5)
if args.scene_id and args.scene_id != 'all':
scene_ids = args.scene_id.split(',')
else:
scene_ids = os.listdir(args.image_folder)
transform_info = {
'channels': channels,
'bbox_size': bbox_size,
}
detector_transforms = xview3.transforms.get_transforms(transform_names, transform_info)
postprocess_transforms = xview3.transforms.get_transforms(['CustomNormalize3'], transform_info)
dataset = SceneDataset(
image_folder=args.image_folder,
scene_ids=scene_ids,
channels=channels,
)
model_cls = xview3.models.models[model_name]
model = model_cls(
num_classes=4,
num_channels=len(channels),
image_size=args.window_size,
device=device,
config=config["training"],
disable_multihead=True,
)
weight_files = args.weights.split(',')
model.load_state_dict(torch.load(weight_files[0], map_location=device))
model.to(device)
model.eval()
postprocess_model = Model()
postprocess_model.load_state_dict(torch.load(args.postprocess_weights))
postprocess_model.eval()
postprocess_model.to(device)
preds = []
for scene_id, im in dataset:
print('processing scene', scene_id)
preds.append(process_scene(args, clip_boxes, bbox_size, device, weight_files, model, postprocess_model, detector_transforms, postprocess_transforms, scene_id, im))
pred = | pd.concat(preds) | pandas.concat |
#!/usr/bin/env python
######################################################################################
# AUTHOR: <NAME> <<EMAIL>>
# CONTRIBUTORS: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# DESCRIPTION: gaitGM module with functions for KEGG PEA Tools
#######################################################################################
import re
import sys
import csv
import requests
import logging
import tempfile
from difflib import SequenceMatcher
import numpy as np
import pandas as pd
import seaborn as sns
import scipy.stats as st
from rpy2 import robjects as robjects
from rpy2.robjects.conversion import localconverter
from rpy2.robjects import pandas2ri
from matplotlib.backends.backend_pdf import PdfPages
from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage as STAP
from secimtools.dataManager import logger as sl
from secimtools.dataManager.interface import wideToDesign
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
from secimtools.visualManager.module_mmc import expansion, get_clustering
from importlib import resources as ires
def checkForDuplicates(dataset, uniqID):
"""
Check for duplicated values in a dataset column. If found, terminate the program and provide
information to stderr
Arguments:
:param dataset: Input dataset to check if uniqID column is unique
:type dataset: file
:param uniqID: Unique Identifier Column Name
:type uniqID: string
"""
table = pd.read_table(dataset, sep="\t", header=0)
# In case of a Selected Yes/No Column:
if "Selected" in table.columns:
table = table.loc[table["Selected"] != "No"]
ids = table[uniqID]
duplicates = table[ids.isin(ids[ids.duplicated()])][uniqID]
if not duplicates.empty:
pd.Series.__unicode__ = pd.Series.to_string
sys.stderr.write(
"Duplicated IDs!\n\nRow_Number\tRepeated_ID\n" + str(duplicates)
)
sys.exit(1)
def downloadGeneParser(species):
"""
Add KEGG Annotation Info (kegg_id, gene_name) from KEGG database.
Arguments:
:param species: species identifier in kegg to download
:type species: string
Returns:
:return gene2keggsArray: Dictionary with KEGG information about gene name and identifier
:rtype gene2keggsArray: Dictionary
"""
gene2keggsArray = {}
with requests.get("http://rest.kegg.jp/list/" + species) as genes:
gene2keggs = genes.content.splitlines()
for line in gene2keggs:
line = line.decode('utf-8')
geneId = line.split("\t")[0]
geneNames = line.split("\t")[1]
gene2keggsArray[geneId] = geneNames
return gene2keggsArray
def downloadMetParser():
"""
Download metabolite information (kegg_id, cpd_name) from KEGG database.
Returns:
:return met2keggsArray: Dictionary with KEGG information about compound name and identifier
:rtype met2keggsArray: dictionary
"""
met2keggsArray = {}
with requests.get("http://rest.kegg.jp/list/compound") as metabolites:
met2keggs = metabolites.content.splitlines()
for line in met2keggs:
line = line.decode('utf-8')
cpdId = line.split("\t")[0]
cpdNames = line.split("\t")[1]
met2keggsArray[cpdId] = cpdNames
return met2keggsArray
def keggAnno(feature_table, feature2keggs, featureOut, uniqIDNameCol, featureNameCol, featureType):
"""
Takes the Name column of a Dataset and find KEGG-related information.
It works for Gene Expression and Metabolomic data (depending on featureType parameter).
It will return an Annotation Dataset with this information and list possible ties.
Arguments:
:param feature_table: Table that contains at least a column with a Unique Identifier and
another one with the feature name.
:type feature_table: pandas dataset
:param feature2keggs: Dictionary created with KEGG Information: {KEGG_ID: FeatureName}
:type feature2keggs: dictionary
:param featureOut: Initializated output table with this information: UniqId FeatureName
Feature_Type Matched Name_in_KEGG KEGG_ID Similarity Tie Selected
:type featureOut: file
:param uniqIDNameCol: Name of the column with Unique Identifiers.
:type uniqIDNameCol: string
:param featureNameCol: Name of the column with feature names.
:type featureNameCol: string
:param featureType: One of: 'Gene' or 'Metabolite'.
:type featureType: string
Returns:
:return featureOut: Finished output table
:rtype featureOut: File
"""
tmpList_v1 = []
tmpList_v2 = []
emptyList = ["nan", "", "na"]
for index, featureRow in feature_table.iterrows():
uniqueID = str(featureRow[uniqIDNameCol]).strip()
featureName = str(featureRow[featureNameCol]).strip()
featureName = featureName.replace("\t", "_")
if (featureType == "Metabolite") and (featureName not in emptyList):
newMetNames = metaboliteModification(featureName)
# To not match NaN to KEGG
if featureName.lower() in emptyList:
tmpList_v1.append(
uniqueID +
"\t" +
featureName +
"\t" +
featureType +
"\tNA\tNA\tNA\tNA\tNA\tNA\n"
)
else:
featureDict_v1 = {}
# metabolite dict: {keggMetName1: [similarity1, kegg_cpd1], keggMetName2: [similarity2,
# kegg_cpd2], ...}
for kegg_id, keggFeatureNames in feature2keggs.items():
if featureType == "Gene":
keggGeneNames = keggFeatureNames.split(";")[0].split(",")
for keggGeneName in keggGeneNames:
if re.search(
".*" + re.escape(str(featureName)) + ".*",
keggGeneName,
re.IGNORECASE,
):
similarity = calculateSimilarity(
str(featureName).strip(), str(keggGeneName).strip()
)
featureDict_v1[str(keggGeneName).strip()] = [
similarity,
kegg_id,
]
else:
for newMetName in newMetNames:
if re.search(
".*" + re.escape(newMetName) + ".*",
keggFeatureNames,
re.IGNORECASE,
):
featureDict_v1 = add2Dictionary(
featureName,
newMetName,
kegg_id,
keggFeatureNames,
featureDict_v1,
)
if featureDict_v1:
# Solving ties and Sort by similarity
sortedMetDict = sorted(featureDict_v1.values(), reverse=True)
maximum_v1 = sortedMetDict[0]
if len(featureDict_v1) > 1:
maximum_v2 = sortedMetDict[1]
if (maximum_v1[0] == maximum_v2[0]) or (
maximum_v1[0] - maximum_v2[0] <= 0.05 * maximum_v1[0]
):
# Warning Message
print(
"Warning! There is a tie with",
featureName + ":",
list(featureDict_v1.keys())[
list(featureDict_v1.values()).index(maximum_v1)
],
"selected,",
list(featureDict_v1.keys())[
list(featureDict_v1.values()).index(maximum_v2)
],
"rejected.",
)
is_tie = "Yes"
else:
is_tie = "No"
else:
is_tie = "No"
# UniqueID FeatureName featureType Matched KEGG_Name Kegg_cpd Similarity Tie elected
tmpList_v1.append(
uniqueID +
"\t" +
featureName +
"\t" +
featureType +
"\t" +
"Yes" +
"\t" +
str(
list(featureDict_v1.keys())[
list(featureDict_v1.values()).index(maximum_v1)
]
) +
"\t" +
str(maximum_v1[1]) +
"\t" +
str(round(maximum_v1[0], 2)) +
"\t" +
is_tie +
"\tYes\n")
featureDict_v2 = dict(featureDict_v1)
del featureDict_v2[
list(featureDict_v2.keys())[
list(featureDict_v2.values()).index(maximum_v1)
]
]
for keggName in featureDict_v2:
if is_tie == "Yes":
if (
keggName == list(featureDict_v1.keys())[
list(featureDict_v1.values()).index(maximum_v2)
]
):
tmpList_v1.append(
uniqueID +
"\t" +
featureName +
"\t" +
featureType +
"\tYes\t" +
str(keggName.strip()) +
"\t" +
str(featureDict_v2[keggName][1]) +
"\t" +
str(round(featureDict_v2[keggName][0], 2)) +
"\t" +
is_tie +
"\tNo\n"
)
else:
tmpList_v2.append(
uniqueID +
"\t" +
featureName +
"\t" +
featureType +
"\tYes\t" +
str(keggName.strip()) +
"\t" +
str(featureDict_v2[keggName][1]) +
"\t" +
str(round(featureDict_v2[keggName][0], 2)) +
"\tNo\tNo\n"
)
else:
tmpList_v2.append(
uniqueID +
"\t" +
featureName +
"\t" +
featureType +
"\tYes\t" +
str(keggName.strip()) +
"\t" +
str(featureDict_v2[keggName][1]) +
"\t" +
str(round(featureDict_v2[keggName][0], 2)) +
"\t" +
is_tie +
"\tNo\n")
else:
tmpList_v1.append(
uniqueID +
"\t" +
featureName +
"\t" +
featureType +
"\tNo\tNA\tNA\tNA\tNA\tNA\n"
)
# Writing results - first selected metabolites, last non-selected metabolites.
for line in tmpList_v1:
featureOut.write(line)
for line2 in tmpList_v2:
featureOut.write(line2)
return featureOut
def metaboliteModification(metabolite):
"""
Modify input metabolite name in order to find its kegg identifier. It will be modified
following this pipeline:
- Remove common metabolite prefixes (d-, alpha-, ...)
- Use long name if aminoacid abreviation provided (cys = cysteine, ...)
- Use long name if metabolite abreviation provided (orn = ornithine, ...)
- Change -ic acid ending by -ate
- Remove common chemical words that can worsen the matching (sulfoxide, ...)
- Use long name if lipid abreviation provided (pc = phosphatidylcholine, ...)
- If everything fails, use input metabolite name
Arguments:
:param metabolite: Input metabolite name
:type metabolite: string
Returns:
:return newMetNames: Modified metabolite names in a list.
:rtype newMetNames: list
"""
mainPrefixes = [
re.compile(r"^n?(-|\s)?[0-9]?(-|\s)?.*yl(-|\s)"),
re.compile(r"^(((l|d|dl|ld)|[0-9])(-|\s))?tert(-|\s)?"),
re.compile(r"^[0-9]?(-|\s)?hydroxy(-|\s)?"),
re.compile(r"^n?(-|\s)?[0-9]?(-|\s)?boc(-|\s)?"),
re.compile(r"\(.*\)"),
re.compile(r"^n?(-|\s)?[0-9]?(-|\s)?acetyl(-|\s)?"),
re.compile(
r"^((cis|trans)?(-|\s))?((s|r|\(s\)|\(r\))?(-|\s))?((([0-9]?)(,?))+?(-|\s))?(n(-|\s))?((d|l|dl|ld)(-|\s))?((alpha|beta|a|b)(-|\s))?(([0-9]?)(,?))+?(-|\s)?"
),
]
# Add as many chemical words as desired
chemWords = [
"sulfoxide",
"-sulfoxide",
"sulfate",
"-sulfate",
"sulfonate",
"-sulfonate",
]
# Commonly abbreviated words, like aminoacids
aminoacids = {
"l-cysteine": ["c", "cys"],
"l-aspartate": ["d", "asp"],
"l-serine": ["s", "ser"],
"l-glutamine": ["q", "gln"],
"l-lysine": ["k", "lys"],
"l-isoleucine": ["i", "ile"],
"l-proline": ["p", "pro"],
"l-threonine": ["t", "thr"],
"l-phenylalanine": ["f", "phe"],
"l-asparagine": ["n", "asn"],
"glycine": ["g", "gly"],
"l-histidine": ["h", "his"],
"l-leucine": ["l", "leu"],
"l-arginine": ["r", "arg"],
"l-tryptophan": ["w", "trp"],
"l-alanine": ["a", "ala"],
"l-valine": ["v", "val"],
"l-glutamate": ["e", "glu"],
"l-tyrosine": ["y", "tyr"],
"l-methionine": ["m", "met"],
}
abbreviations = {
"citrate": "cit",
"ornithine": "orn",
"thyroxine": "thyr",
"butoxycarbonyl": "boc",
}
lipids = {
"sphingomyelin": "sm",
"lysophosphatidylcholine": "lysopc",
"phosphatidylcholine": "pc",
"phosphatidylethanolamine": "pe",
"lysophosphatidylethanolamine": "lysope",
}
metabolite = metabolite.lower()
newMetNames = []
newMetNamesNoPrefix = []
# If metabolite is an abbreviation of an aminoacid
if (
(metabolite in dict(tuple(aminoacids.values())).keys()) or
(metabolite in dict(tuple(aminoacids.values())).values()) or
(metabolite in str(tuple(aminoacids.keys())))
):
for aminoacid in aminoacids.keys():
if (metabolite in aminoacids[aminoacid]) and (aminoacid not in newMetNames):
newMetNames.append(aminoacid)
elif (aminoacid.endswith(metabolite)) and (aminoacid not in newMetNames):
newMetNames.append(aminoacid)
# If metabolite is an abbreviation of another commonly abbreviated metabolites
if any(completeName in metabolite for completeName in abbreviations.values()):
for completeName in abbreviations.keys():
if abbreviations[completeName] in metabolite:
newMetNames.append(
re.sub(abbreviations[completeName], completeName, metabolite)
)
newMetNames.append(metabolite)
# If metabolite is an acid, use the -ate nomenclature
if (
("ic acid" in metabolite) or
("ic_acid" in metabolite) or
("icacid" in metabolite)
) and (re.sub(r"ic.?acid", "ate", metabolite) not in newMetNames):
newMetNames.append(re.sub(r"ic.?acid", "ate", metabolite))
newMetNames.append(re.sub(r"ic.?acid", "ic acid", metabolite))
# If metabolite contains any chemical word, remove it
if any(chemWord in metabolite for chemWord in chemWords):
for chemWord in chemWords:
if (chemWord in metabolite) and (
re.sub(chemWord, "", metabolite) not in newMetNames
):
newMetNames.append(re.sub(chemWord, "", metabolite))
newMetNames.append(metabolite)
# Search in pubchem for synonyms - skipped, but retained for the future
# if (pcp.get_synonyms(metabolite, 'name', 'compound')) and (not newMetNames):
# synonymsList = pcp.get_synonyms(metabolite, 'name', 'compound')
# for dictionarySynonyms in synonymsList:
# synonyms = dictionarySynonyms[u'Synonym']
# for synonym in synonyms:
# # similarity > 0.2 to increase the speed
# if (str(synonym) not in newMetNames) and
# (SequenceMatcher(a=metabolite, b=synonym).ratio() > 0.2):
# newMetNames.append(str(synonym))
# If metabolite is an abbreviation of a lipid
if any(lipid in metabolite for lipid in lipids.values()):
for lipid in lipids.keys():
if (metabolite.startswith(lipids[lipid])) and (lipid not in newMetNames):
newMetNames.append(lipid)
# If everything fails, take original name
if not newMetNames:
newMetNames.append(metabolite.strip())
# Check for prefixes
for newMetName in newMetNames:
for mainPrefix in mainPrefixes:
if (
mainPrefix.search(newMetName) and
(re.sub(mainPrefix, "", newMetName) not in newMetNamesNoPrefix) and
(newMetName not in aminoacids)
):
newMetNameNoPrefix = re.sub(mainPrefix, "", newMetName)
newMetNames.insert(len(newMetNames), newMetNameNoPrefix)
newMetNamesNoPrefix.append(newMetNameNoPrefix)
if newMetName not in newMetNamesNoPrefix:
newMetNamesNoPrefix.append(newMetName)
return newMetNamesNoPrefix
def add2Dictionary(metabolite, newMetName, kegg_cpd, keggMetNames, metDict):
"""
Compare input metabolite name with KEGG name.
If newMetName (metabolite without prefix) == keggMetName:
Calculate similarity between metabolite (oirginal name) and keggMetName
It creates different dictionaries for each metabolite (one per match)
Arguments:
:param metabolite: Input metabolite name
:type metabolite: string
:param newMetName: Input metabolite name without prefix and chemical words.
:type newMetName: string
:param kegg_cpd: Kegg identifier of the metabolite
:type kegg_cpd: string
:param keggMetNames: "List" of Kegg metabolite names associated to a Kegg identifier
:type keggMetNames: string
Returns:
:return metDict: Output dictionary with this structure: {Metabolite_Name_in_Kegg:
[similarity, Kegg_compound_identifier]
:rtype metDict: dictionary
"""
keggMetNames = keggMetNames.split(";")
for keggMetName in keggMetNames:
if re.search(
".*" + re.escape(newMetName) + ".*", keggMetName.strip(), re.IGNORECASE
):
similarity = calculateSimilarity(metabolite.strip(), keggMetName.strip())
metDict[keggMetName.strip()] = [similarity, kegg_cpd]
return metDict
def calculateSimilarity(featureName, keggName):
"""
Compare two feature (gene/metabolite) names and return the similarity between them. If the only
difference between the names is one of the mainPrefixes a similarity of 90% is returned.
Arguments:
:param metabolite: Input metabolite name
:type metabolite: string
:param keggName: Name to check the similarity with
:type keggName: string
Returns:
:return similarity: Percentage of similarity between 2 input names.
:rtype similarity: float
"""
mainPrefixes = [
"",
"cis-",
"trans-",
"d-",
"l-",
"(s)-",
"alpha-",
"beta-",
"alpha ",
"beta ",
"alpha-d-",
"beta-d-",
"alpha-l-",
"beta-l-",
"l-beta-",
"l-alpha-",
"d-beta-",
"d-alpha-",
]
if featureName == keggName:
similarity = 1.0
elif featureName.lower() == keggName.lower():
similarity = 0.9
elif (featureName.lower() in mainPrefixes) or (keggName.lower() in mainPrefixes):
similarity = SequenceMatcher(a=featureName.lower(), b=keggName.lower()).ratio()
elif keggName.lower().replace(featureName.lower(), "") in mainPrefixes:
similarity = 0.9
elif featureName.lower().replace(keggName.lower(), "") in mainPrefixes:
similarity = 0.9
else:
similarity = SequenceMatcher(a=featureName, b=keggName).ratio()
return similarity
def downloadKeggInfo(args):
"""
Download necessary information from Kegg Database for parsing.
Arguments:
:param geneKeggAnnot: Gene to KEGG ID Link file
:type geneKeggAnnot: file
:param metKeggAnnot: Metabolite to KEGG ID Link file
:type metKeggAnnot: file
Returns:
:return gen2kegg: kegg_gene_identifier "\t" Gene_Symbol ";" Gene_name
:rtype gen2kegg: file
:return kgen2pathway: kegg_gene_identifier "\t" pathway_identifier_for_gene
:rtype kgen2pathway: file
:return met2kegg: kegg_metabolite_identifier "\t" Metabolite_names_list_sep_;
:rtype met2kegg: file
:return kmet2pathway: kegg_metabolite_identifier "\t" pathway_identifier_for_metabolite
:rtype kmet2pathway:similarity file
:return pathways: pathway_identifier_for_gene "\t" Pathway_name "-" Specified_organism
:rtype pathways: file
"""
# GeneKeggID2PathwayID
if args.geneKeggAnnot:
geneKeggAnnot = requests.get(
"http://rest.kegg.jp/link/" + args.species + "/pathway"
)
with open(args.kgen2pathways, 'w') as fh:
fh.write(geneKeggAnnot.content.decode("utf-8"))
# MetaboliteKeggID2PathwayID
if args.metKeggAnnot:
metKeggAnnot = requests.get(
"http://rest.kegg.jp/link/compound/pathway"
)
with open(args.kmet2pathways, 'w') as fh:
fh.write(metKeggAnnot.content.decode("utf-8"))
# PathwayID2PathwayNames
if args.pathways:
pathways_data = requests.get(
"http://rest.kegg.jp/list/pathway/" + args.species
)
with open(args.pathways, 'w') as fh:
fh.write(pathways_data.content.decode("utf-8"))
def keggAnnot2list(keggAnnotFile, UniqueID, featureName, featureKeggId, featureType):
"""
Create a dictionary that for the next function to find KEGG pathway information.
Use when the input dataset has been parsed with metabolite_parser tool.
Arguments:
:param keggAnnotFile: Output file from Add KEGG Annotation Info Tool
:type keggAnnotFile: file
:param UniqueID: Name of the column with the unique identifiers.
:type UniqueID: string
:param featureName: Name of the column with feature names.
:type featureName: string
:param featureKeggId: Name of the column with KEGG Identifiers.
:type featureKeggId: string
:param featureType: One of 'Gene' or 'Metabolite'
:type featureType: string
Returns:
:return featureDict: Dictionary with this information: (uniqueID + "\t" + featureName) =
[nameInKegg, kegg_id]
:rtype featureDict: dictionary
:return featureList: List with the information of the features without kegg_id
:return featureList: list
"""
featureDict = {}
featureList = []
with open(keggAnnotFile, "r") as keggAnnot:
header = keggAnnot.readline()
header = header.strip().split("\t")
keggIdCol = header.index(featureKeggId)
uniqueIdCol = header.index(UniqueID)
featureNameCol = header.index(featureName)
for line in keggAnnot:
uniqueID = str(line.split("\t")[uniqueIdCol])
featureName = str(line.split("\t")[featureNameCol])
if "Selected" in header:
selectedCol = header.index("Selected")
selected = line.split("\t")[selectedCol].strip()
if selected == "Yes":
kegg_id = line.split("\t")[keggIdCol]
featureDict[uniqueID + "\t" + featureName] = kegg_id
# For those that does not have kegg_id
if selected == "NA":
featureList.append(
uniqueID +
"\t" +
featureName +
"\t" +
featureType +
"\t" +
"NA" +
"\t" +
"NA" +
"\t" +
"NA" +
"\n"
)
else:
kegg_id = line.split("\t")[keggIdCol]
if kegg_id:
featureDict[uniqueID + "\t" + featureName] = kegg_id
else:
featureList.append(
uniqueID +
"\t" +
featureName +
"\t" +
featureType +
"\t" +
"NA" +
"\t" +
"NA" +
"\t" +
"NA" + "\n"
)
return (featureDict, featureList)
def add_path_info(
featureDict,
featureList,
featureType,
keggId2pathway,
pathId2pathName,
species,
outputFile,
):
"""
Find all pathways in KEGG related to a gene or a metabolite.
Arguments:
:param featureDict: Dictionary with this information: (uniqueID + "\t" + featureName) =
[nameInKegg, kegg_id]
:type featureDict: dictionary
:param featureList: List with the information of the features without kegg_id
:type featureList: list
:param featureType: Type of the feature. One of 'Gene' or 'Metabolite'
:type featureType: string
:param keggId2pathway: Downloaded information from KEGG with the gene/metabolite KEGG Id
and the Pathway ID
:type keggId2pathway: file
:param pathId2pathName: Downloaded information from KEGG with the Pathway ID and the
Pathway Names
:type pathId2pathName: file
:param species: species identifier in kegg
:type species: string
:param outputFile: Output File Name to write the results.
:type outputFile: string
"""
output = open(outputFile, "w")
output.write(
"UniqueID\tFeature_Name\tFeature_Type\tKEGG_ID\tPathway_ID\tPathway_Name\n"
)
features = []
for inputFeature, feature in featureDict.items():
kegg_ids = [feature]
# Step 1) Obtain path_id
path_ids = parseWord(kegg_ids, keggId2pathway, 1, species)
# Step 2) Obtain path_name
paths = parseWord(path_ids, pathId2pathName, 0, species)
if paths:
for path in paths:
features.append(inputFeature + "\t" + featureType + "\t" + path + "\n")
features.extend(featureList)
for feature in sorted(features, key=natural_keys):
output.write(feature)
output.close()
def parseWord(toParseList, parser, nameIndex, species):
"""
Read a column from a KEGG file, find a given word and return a value from another column.
Use to parse genes and metabolites into kegg identifiers and find related pathways via names.
Arguments:
:param toParseList: Input words list to parse
:type toParseList: list
:param parser: Kegg downloaded file
:type parser: file
:param nameIndex: index value of the column where the input word is in the parser file
:type nameIndex: integer (0 or 1)
:param species: species identifier in kegg
:type species: string
Returns:
:return outputList: Input list with parsed value included. outputList_v2 if removing gene
symbol needed
:rtype outputList: list
"""
parser = open(parser, "r")
outputList = []
outputList_v2 = []
for toParseLine in toParseList:
toParseLineList = toParseLine.split("\t")
toParseWord = toParseLineList[len(toParseLineList) - 1]
parser.seek(0)
for parserLine in parser:
names2findIn = parserLine.split("\t")[nameIndex].strip()
if ";" in names2findIn:
names2findIn = names2findIn.split(";")[0].strip()
if findWholeWord(toParseWord)(names2findIn):
parsedName = parserLine.split("\t")[abs(nameIndex - 1)].strip()
# Remove " - species information" from pathway name
if " - " in parsedName:
pathName = parsedName.split(" - ")[0]
outputList.append(toParseLine + "\t" + pathName)
# Organism specific pathways
elif "map" in parsedName:
pathId = parsedName.replace("map", species)
outputList.append(toParseLine + "\t" + pathId)
else:
outputList.append(toParseLine + "\t" + parsedName)
if not outputList:
outputList.append(toParseLine + "\tNA")
# In cases of ensembl-gene_symbol-kegg_id-path_id-path_name
for outputLine in outputList:
if len(outputLine.split("\t")) == 4:
if outputLine.split("\t")[3] != "NA":
outputLineList = outputLine.split("\t")
del outputLineList[0]
outputList_v2.append("\t".join(outputLineList))
else:
outputList.remove(outputLine)
parser.close()
return outputList_v2 if outputList_v2 else outputList
def findWholeWord(word):
"""
Find one word inside another word.
Arguments:
:param word: word to check if is contained inside another word
:type word: string
Returns:
:return is_contained: Returns "yes" if it's contained, "no" if not
:rtype is_contained: boolean
"""
return re.compile(
r"\b(?![-])({0})(?![-])\b".format(word), flags=re.IGNORECASE
).search
def atoi(text):
"""
Sort numbers in human readable order.
"""
return int(text) if text.isdigit() else text
def natural_keys(text):
"""
alist.sort(key=natural_keys) sorts in human readable order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [atoi(c) for c in re.split("(\d+)", text)]
def fisherExactTest(args, path_feat):
"""
Perform a complete Pathway enrichment analysis:
1) Calculate all values of the contingency table
2) Perform a Fisher Exact test
3) Perform an FDR correction of p-values
*********************
* Fisher exact test *
*********************
| Molecules associated to pathway | Molecules not associated to pathway | Total
-----------------------------------------------------------------------------------------
DEG/M | a | b | a+b
-----------------------------------------------------------------------------------------
No DEG/M | c | d | c+d
-----------------------------------------------------------------------------------------
Total | a+c | b+d | N
-----------------------------------------------------------------------------------------
N = Total of molecules
a+b = Total 1 flags
c+d = Total 0 flags
Arguments:
:params deaGeneDataset deaMetDataset: Tables with Differential Expression Analysis
information for gene expression and metabolomics, respectively
:types deaGeneDataset met_dataset: files
:params gene_id_col, met_id_col, gene_flag_col, met_flag_col: Column names of unique
identifiers and desired flag column of gene expression and metabolomics datasets,
respectively
:type gene_id_col, met_id_col, gene_flag_col, met_flag_col: strings
:params alpha, method: alpha-value and method desired for the FDR correction
:type alpha, method: strings
Returns:
:return output: Table with this structure: Pathway Name Odds_Ratio P_value FDR_Correction
Flag_#
:rtype output: file
"""
# N_gene = sum(1 for line in open(args.deaGeneDataset)) - 1
# N_met = sum(1 for line in open(args.deaMetDataset)) - 1
# N = N_gene + N_met
# a+b, c+d)
metDeCounter = 0
metNonDeCounter = 0
with open(args.deaGeneDataset, "r") as geneDataset:
header_gene = geneDataset.readline().split("\t")
indice_gene_flag = header_gene.index(args.gene_flag_col)
for line in geneDataset:
flag = int(line.split("\t")[indice_gene_flag])
if flag == 1:
metDeCounter += 1
elif flag == 0:
metNonDeCounter += 1
geneDeCounter = 0
geneNonDeCounter = 0
with open(args.deaMetDataset, "r") as metDataset:
header_met = metDataset.readline().split("\t")
indice_met_flag = header_met.index(args.met_flag_col)
for line in metDataset:
flag = int(line.split("\t")[indice_met_flag])
if flag == 1:
geneDeCounter += 1
elif flag == 0:
geneNonDeCounter += 1
a_b = metDeCounter + geneDeCounter
c_d = metNonDeCounter + geneNonDeCounter
# a)
geneDataset = open(args.deaGeneDataset, "r")
metDataset = open(args.deaMetDataset, "r")
PEAList = []
pvalues = []
indice_gene_id = header_gene.index(args.gene_id_col)
indice_met_id = header_met.index(args.met_id_col)
for pathway in path_feat.keys():
a_gen = 0
a_met = 0
a_c = 0
for value in path_feat[pathway]:
geneDataset.seek(0)
geneDataset.readline()
for line in geneDataset:
gene = line.split("\t")[indice_gene_id].replace('"', "")
if value == gene:
a_gen += int(line.split("\t")[indice_gene_flag])
a_c += 1
break
metDataset.seek(0)
metDataset.readline()
for line_v2 in metDataset:
metabolite = line_v2.split("\t")[indice_met_id].replace('"', "")
if value == metabolite:
a_met += int(line_v2.split("\t")[indice_met_flag])
a_c += 1
break
a = a_gen + a_met
# b), c), d)
b = a_b - (a_gen + a_met)
c = a_c - (a_gen + a_met)
d = c_d - (a_c - (a_gen + a_met))
oddsratio, pvalue = st.fisher_exact([[a, b], [c, d]])
pvalues.append(pvalue)
PEAList.append(pathway + "\t" + str(oddsratio) + "\t" + str(pvalue))
geneDataset.close()
metDataset.close()
return PEAList
##########################
# All vs All Correlation #
##########################
def Ids2Names(dataset, Id, annot, annotName):
"""
Change unique identifiers to feature names. It could be used for genes or for metabolites.
Arguments:
:param dataset: Wide dataset (Gene Expression/Metabolomics)
:type dataset: pandas dataframe
:param Id: Name of the column with Unique Identifiers.
:type Id: string
:param annot: Annotation File (For Gene Expression/Metabolomics). This file must contain at
least 2 columns.
:type annot: file
:param annotName: Name of the column of the Annotation File that contains the Feature Name
(Genes/Metabolites)
:type annotName: file
Returns:
:return new_dataset: Wide dataset with feature names (genes/metabolites) instead of unique
identifiers
:rtype new_dataset: pandas dataset
"""
annotTable = pd.read_table(annot, sep="\t", header=0)
if "Selected" in annotTable.columns:
annotTable = annotTable[annotTable["Selected"] == "Yes"]
annotTable = annotTable.drop_duplicates(subset=Id, keep="first")
for index, row in dataset.iterrows():
ID = str(row[Id])
name = str(annotTable.loc[annotTable[Id] == ID, annotName].item())
dataset.loc[index, Id] = ID + ": " + name
new_dataset = dataset.rename(columns={Id: annotName})
new_dataset = new_dataset.set_index(annotName)
return new_dataset
########
# sPLS #
########
def prepareSPLSData(args):
"""
Perform subsetting of the data for the sPLS tool.
Arguments:
:param geneDataset metDataset: Gene expression and Metabolomics wide dataset, respectively.
:type geneDataset metDataset: files
:param geneOption metOption: Options for subsetting Gene Expression and Metabolomics
datasets, respectively.
:type geneOption metOption: string
"""
args.geneDataset = pd.read_table(args.geneDataset, sep="\t", header=0)
metTable = | pd.read_table(args.metDataset, sep="\t", header=0) | pandas.read_table |
"""
File: train_baseline
Description: This file trains a MLP on the wine dataset, with varied amounts of
labels available in order to establish a baseline for the model.
Author <NAME> <<EMAIL>>
License: Mit License
"""
from datetime import datetime
import numpy as np
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import os
import argparse
#seed the RNG
np.random.seed(123)
tf.random.set_seed(123)
#args = number of labels to train on
parser = argparse.ArgumentParser(description = "Training Arguements")
parser.add_argument("dataset",help="Options = wine,")
parser.add_argument("device",
help="options = [GPU:x,CPU:0]",
type=str)
parser.add_argument("n_labels",
help = "Number of labels to train on [4000,2000,1000,500,250]",
type = int)
parser.add_argument("batch_size",
help="batch size for training [64,128,256]",
type =int)
args = parser.parse_args()
#define the device
dev = "/"+args.device
# if GPU lock to single device:
# if dev != "/CPU:0":
# os.environ["CUDA_VISIBLE_DEVICES"]=dev[-1]
# breakpoint()
with tf.device(dev):
dname = args.dataset
n_labels = args.n_labels
batch_size = args.batch_size
#save losses to tensorboard
run_tag = "n"+str(n_labels)+"-b"+str(batch_size)
logdir = os.path.join("src","logs",dname,"baseline",run_tag)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
#initialisations for dataset
scaler = MinMaxScaler()
if dname =="wine":
path = os.path.join("datasets","wine","winequality-white.csv")
data = pd.read_csv(path,sep=';')
X = data.drop(columns='quality')
Y = data['quality']
#fit the scaler to X
scaler.fit(X)
#split into train and test sets
train_x,test_x,train_y,test_y = train_test_split(X,Y,
random_state = 0, stratify = Y,shuffle=True,
train_size=4000)
train = scaler.transform(train_x)
test = scaler.transform(test_x)
train_y = | pd.DataFrame.to_numpy(train_y) | pandas.DataFrame.to_numpy |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 19:43:48 2020
@author: tommasobassignana
"""
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from sklearn import preprocessing
del(xml_file, xroot, xtree)
data = df
del(df)
def resample(data, freq):
"""
:param data: dataframe
:param freq: sampling frequency
:return: resampled data between the the first day at 00:00:00 and the last day at 23:60-freq:00 at freq sample frequency
"""
start = data.datetime.iloc[0].strftime('%Y-%m-%d') + " 00:00:00"
end = datetime.strptime(data.datetime.iloc[-1].strftime('%Y-%m-%d'), "%Y-%m-%d") + timedelta(days=1) - timedelta(
minutes=freq)
index = pd.period_range(start=start,
end=end,
freq=str(freq) + 'min').to_timestamp()
data = data.resample(str(freq) + 'min', on="datetime").agg({'glucose': np.mean, 'CHO': np.sum, "insulin": np.sum})
data = data.reindex(index=index)
data = data.reset_index()
data = data.rename(columns={"index": "datetime"})
return data
data_resampled = resample(data, 5)
#fill na's
data_resampled["glucose"].interpolate(method = "polynomial", order = 3, inplace = True)#impostare limit se no vengono dei valori di glucosio negativi
#fill na's with zeros
for col in data_resampled.columns:
if "insulin" in col or "CHO" in col:
data_resampled[col] = data_resampled[col].fillna(0)
#train test split
n = len(data_resampled)
n
train_df = data_resampled[0:int(n*0.7)]
test_df = data_resampled[int(n*0.7):int(n*1)]
train_df.shape
test_df.shape
#standardizzazione
x_cols = [x for x in train_df.columns if x != 'datetime']
min_max_scaler = preprocessing.MinMaxScaler()# 0 1 scale
train_df = min_max_scaler.fit_transform(train_df[x_cols])
#handle sparses data well, but re read documentation
#how to apply the same transformation to the tast set
#check if correct!
test_df = min_max_scaler.transform(test_df[x_cols])
#recreate the dataframe
train_df = pd.DataFrame(data=train_df, columns=x_cols)
test_df = pd.DataFrame(data=test_df, columns=x_cols)
train_df.shape
test_df.shape
#add datetime again?
train_df["datetime"] = pd.DatetimeIndex(data_resampled.iloc[:int(len(train_df["glucose"])), 0].values)
test_df["datetime"] = pd.DatetimeIndex(data_resampled.iloc[len(train_df["glucose"]):n, 0].values)
train_df = train_df[['datetime',"glucose", "CHO", "insulin"]]
test_df = test_df[['datetime',"glucose", "CHO", "insulin"]]
train_df.shape
test_df.shape
#da togliere
#train_df.dropna(inplace = True)
#test_df.dropna(inplace = True)
train_df.shape
test_df.shape
#test_df.to_csv("test_longitudinal", index = False)
def create_samples(data, ph, hist):
n_samples = data.shape[0] - ph - hist + 1
# number of rows
y = data.loc[ph + hist - 1:, "glucose"].values.reshape(-1, 1)
d = | pd.DatetimeIndex(data.loc[ph + hist - 1:, "datetime"].values) | pandas.DatetimeIndex |
from __future__ import annotations
import threading
import time
import numpy as np
import pandas as pd
from aistac.components.abstract_component import AbstractComponent
from ds_discovery import EventBookPortfolio
from ds_discovery.components.commons import Commons
from ds_discovery.managers.controller_property_manager import ControllerPropertyManager
from ds_discovery.intent.controller_intent import ControllerIntentModel
__author__ = '<NAME>'
class Controller(AbstractComponent):
"""Controller Class for the management and overview of task components"""
DEFAULT_MODULE = 'ds_discovery.handlers.pandas_handlers'
DEFAULT_SOURCE_HANDLER = 'PandasSourceHandler'
DEFAULT_PERSIST_HANDLER = 'PandasPersistHandler'
REPORT_USE_CASE = 'use_case'
URI_PM_REPO = None
eb_portfolio: EventBookPortfolio
def __init__(self, property_manager: ControllerPropertyManager, intent_model: ControllerIntentModel,
default_save=None, reset_templates: bool=None, template_path: str=None, template_module: str=None,
template_source_handler: str=None, template_persist_handler: str=None,
align_connectors: bool=None):
""" Encapsulation class for the components set of classes
:param property_manager: The contract property manager instance for this component
:param intent_model: the model codebase containing the parameterizable intent
:param default_save: The default behaviour of persisting the contracts:
if False: The connector contracts are kept in memory (useful for restricted file systems)
:param reset_templates: (optional) reset connector templates from environ variables (see `report_environ()`)
:param template_path: (optional) a template path to use if the environment variable does not exist
:param template_module: (optional) a template module to use if the environment variable does not exist
:param template_source_handler: (optional) a template source handler to use if no environment variable
:param template_persist_handler: (optional) a template persist handler to use if no environment variable
:param align_connectors: (optional) resets aligned connectors to the template
"""
self.eb_portfolio = EventBookPortfolio.from_memory(has_contract=False)
super().__init__(property_manager=property_manager, intent_model=intent_model, default_save=default_save,
reset_templates=reset_templates, template_path=template_path, template_module=template_module,
template_source_handler=template_source_handler,
template_persist_handler=template_persist_handler, align_connectors=align_connectors)
@classmethod
def from_uri(cls, task_name: str, uri_pm_path: str, username: str, uri_pm_repo: str=None, pm_file_type: str=None,
pm_module: str=None, pm_handler: str=None, pm_kwargs: dict=None, default_save=None,
reset_templates: bool=None, template_path: str=None, template_module: str=None,
template_source_handler: str=None, template_persist_handler: str=None, align_connectors: bool=None,
default_save_intent: bool=None, default_intent_level: bool=None, order_next_available: bool=None,
default_replace_intent: bool=None, has_contract: bool=None) -> Controller:
""" Class Factory Method to instantiates the components application. The Factory Method handles the
instantiation of the Properties Manager, the Intent Model and the persistence of the uploaded properties.
See class inline docs for an example method
:param task_name: The reference name that uniquely identifies a task or subset of the property manager
:param uri_pm_path: A URI that identifies the resource path for the property manager.
:param username: A user name for this task activity.
:param uri_pm_repo: (optional) A repository URI to initially load the property manager but not save to.
:param pm_file_type: (optional) defines a specific file type for the property manager
:param pm_module: (optional) the module or package name where the handler can be found
:param pm_handler: (optional) the handler for retrieving the resource
:param pm_kwargs: (optional) a dictionary of kwargs to pass to the property manager
:param default_save: (optional) if the configuration should be persisted. default to 'True'
:param reset_templates: (optional) reset connector templates from environ variables. Default True
(see `report_environ()`)
:param template_path: (optional) a template path to use if the environment variable does not exist
:param template_module: (optional) a template module to use if the environment variable does not exist
:param template_source_handler: (optional) a template source handler to use if no environment variable
:param template_persist_handler: (optional) a template persist handler to use if no environment variable
:param align_connectors: (optional) resets aligned connectors to the template. default Default True
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param order_next_available: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
:param has_contract: (optional) indicates the instance should have a property manager domain contract
:return: the initialised class instance
"""
pm_file_type = pm_file_type if isinstance(pm_file_type, str) else 'json'
pm_module = pm_module if isinstance(pm_module, str) else cls.DEFAULT_MODULE
pm_handler = pm_handler if isinstance(pm_handler, str) else cls.DEFAULT_PERSIST_HANDLER
_pm = ControllerPropertyManager(task_name=task_name, username=username)
_intent_model = ControllerIntentModel(property_manager=_pm, default_save_intent=default_save_intent,
default_intent_level=default_intent_level,
order_next_available=order_next_available,
default_replace_intent=default_replace_intent)
super()._init_properties(property_manager=_pm, uri_pm_path=uri_pm_path, default_save=default_save,
uri_pm_repo=uri_pm_repo, pm_file_type=pm_file_type, pm_module=pm_module,
pm_handler=pm_handler, pm_kwargs=pm_kwargs, has_contract=has_contract)
return cls(property_manager=_pm, intent_model=_intent_model, default_save=default_save,
reset_templates=reset_templates, template_path=template_path, template_module=template_module,
template_source_handler=template_source_handler, template_persist_handler=template_persist_handler,
align_connectors=align_connectors)
@classmethod
def from_env(cls, task_name: str=None, default_save=None, reset_templates: bool=None, align_connectors: bool=None,
default_save_intent: bool=None, default_intent_level: bool=None, order_next_available: bool=None,
default_replace_intent: bool=None, uri_pm_repo: str=None, has_contract: bool=None,
**kwargs) -> Controller:
""" Class Factory Method that builds the connector handlers taking the property contract path from
the os.environ['HADRON_PM_PATH'] or, if not found, uses the system default,
for Linux and IOS '/tmp/components/contracts
for Windows 'os.environ['AppData']\\components\\contracts'
The following environment variables can be set:
'HADRON_PM_PATH': the property contract path, if not found, uses the system default
'HADRON_PM_REPO': the property contract should be initially loaded from a read only repo site such as github
'HADRON_PM_TYPE': a file type for the property manager. If not found sets as 'json'
'HADRON_PM_MODULE': a default module package, if not set uses component default
'HADRON_PM_HANDLER': a default handler. if not set uses component default
This method calls to the Factory Method 'from_uri(...)' returning the initialised class instance
:param task_name: (optional) The reference name that uniquely identifies the ledger. Defaults to 'primary'
:param default_save: (optional) if the configuration should be persisted
:param reset_templates: (optional) reset connector templates from environ variables. Default True
(see `report_environ()`)
:param align_connectors: (optional) resets aligned connectors to the template. default Default True
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param order_next_available: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
:param uri_pm_repo: The read only repo link that points to the raw data path to the contracts repo directory
:param has_contract: (optional) indicates the instance should have a property manager domain contract
:param kwargs: to pass to the property ConnectorContract as its kwargs
:return: the initialised class instance
"""
# save the controllers uri_pm_repo path
if isinstance(uri_pm_repo, str):
cls.URI_PM_REPO = uri_pm_repo
task_name = task_name if isinstance(task_name, str) else 'master'
return super().from_env(task_name=task_name, default_save=default_save, reset_templates=reset_templates,
align_connectors=align_connectors, default_save_intent=default_save_intent,
default_intent_level=default_intent_level, order_next_available=order_next_available,
default_replace_intent=default_replace_intent, uri_pm_repo=uri_pm_repo,
has_contract=has_contract, **kwargs)
@classmethod
def scratch_pad(cls) -> ControllerIntentModel:
""" A class method to use the Components intent methods as a scratch pad"""
return super().scratch_pad()
@property
def intent_model(self) -> ControllerIntentModel:
"""The intent model instance"""
return self._intent_model
@property
def register(self) -> ControllerIntentModel:
"""The intent model instance"""
return self._intent_model
@property
def pm(self) -> ControllerPropertyManager:
"""The properties manager instance"""
return self._component_pm
def remove_all_tasks(self, save: bool=None):
"""removes all tasks"""
for level in self.pm.get_intent():
self.pm.remove_intent(level=level)
self.pm_persist(save)
def set_use_case(self, title: str=None, domain: str=None, overview: str=None, scope: str=None,
situation: str=None, opportunity: str=None, actions: str=None, project_name: str=None,
project_lead: str=None, project_contact: str=None, stakeholder_domain: str=None,
stakeholder_group: str=None, stakeholder_lead: str=None, stakeholder_contact: str=None,
save: bool=None):
""" sets the use_case values. Only sets those passed
:param title: (optional) the title of the use_case
:param domain: (optional) the domain it sits within
:param overview: (optional) a overview of the use case
:param scope: (optional) the scope of responsibility
:param situation: (optional) The inferred 'Why', 'What' or 'How' and predicted 'therefore can we'
:param opportunity: (optional) The opportunity of the situation
:param actions: (optional) the actions to fulfil the opportunity
:param project_name: (optional) the name of the project this use case is for
:param project_lead: (optional) the person who is project lead
:param project_contact: (optional) the contact information for the project lead
:param stakeholder_domain: (optional) the domain of the stakeholders
:param stakeholder_group: (optional) the stakeholder group name
:param stakeholder_lead: (optional) the stakeholder lead
:param stakeholder_contact: (optional) contact information for the stakeholder lead
:param save: (optional) if True, save to file. Default is True
"""
self.pm.set_use_case(title=title, domain=domain, overview=overview, scope=scope, situation=situation,
opportunity=opportunity, actions=actions, project_name=project_name,
project_lead=project_lead, project_contact=project_contact,
stakeholder_domain=stakeholder_domain, stakeholder_group=stakeholder_group,
stakeholder_lead=stakeholder_lead, stakeholder_contact=stakeholder_contact)
self.pm_persist(save=save)
def reset_use_case(self, save: bool=None):
"""resets the use_case back to its default values"""
self.pm.reset_use_case()
self.pm_persist(save)
def report_use_case(self, as_dict: bool=None, stylise: bool=None):
""" a report on the use_case set as part of the domain contract
:param as_dict: (optional) if the result should be a dictionary. Default is False
:param stylise: (optional) if as_dict is False, if the return dataFrame should be stylised
:return:
"""
as_dict = as_dict if isinstance(as_dict, bool) else False
stylise = stylise if isinstance(stylise, bool) else True
report = self.pm.report_use_case()
if as_dict:
return report
report = pd.DataFrame(report, index=['values'])
report = report.transpose().reset_index()
report.columns = ['use_case', 'values']
if stylise:
return self._report(report, index_header='use_case')
return report
def report_tasks(self, stylise: bool=True):
""" generates a report for all the current component task
:param stylise: returns a stylised dataframe with formatting
:return: pd.Dataframe
"""
report = pd.DataFrame.from_dict(data=self.pm.report_intent())
intent_replace = {'transition': 'Transition', 'synthetic_builder': 'SyntheticBuilder', 'wrangle': 'Wrangle',
'feature_catalog': 'FeatureCatalog', 'data_tolerance': 'DataTolerance'}
report['component'] = report.intent.replace(to_replace=intent_replace)
report['task'] = [x[0][10:] for x in report['parameters']]
report['parameters'] = [x[1:] for x in report['parameters']]
report = report.loc[:, ['level', 'order', 'component', 'task', 'parameters', 'creator']]
if stylise:
return self._report(report, index_header='level')
return report
def report_run_book(self, stylise: bool=True):
""" generates a report on all the intent
:param stylise: returns a stylised dataframe with formatting
:return: pd.Dataframe
"""
report = pd.DataFrame(self.pm.report_run_book())
explode = report.explode(column='run_book', ignore_index=True)
canonical = explode.join( | pd.json_normalize(explode['run_book']) | pandas.json_normalize |
import numpy as np
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestGetNumericData:
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
obj = | DataFrame({"A": [1, "2", 3.0]}) | pandas.DataFrame |
import networkx as nx
import pandas as pd
import numpy as np
import copy
import time
# class for features
class Features:
def __init__(self, reducible_pairs, tree_set, str_features=None, root=2, distances=True, comb_measure=True):
if str_features is None:
self.str_features = ["cherry_height", "cherry_in_tree",
"leaf_distance", "red_after_pick",
"new_diff_cherries", "caterpillar",
"tree_height"]
else:
self.str_features = str_features
self.root = root
self.distances = distances
self.comb_measure = comb_measure
# initial attributes
self.tree_level_width_comb = {}
self.tree_level_width_dist = {}
self.data = | pd.DataFrame() | pandas.DataFrame |
# importing libraries
from tkinter import *
from tkinter import ttk, filedialog, messagebox
import pandas as pd
import random
from tkcalendar import *
from datetime import date
from captcha.image import ImageCaptcha
import pyttsx3
import pyaudio
import speech_recognition as sr
engine = pyttsx3.init("sapi5")
voices = engine.getProperty("voices")
engine.setProperty("voice", voices[0].id)
def speak(text):
engine.say(text)
engine.runAndWait()
def takeCommand():
import speech_recognition as s_r
r = s_r.Recognizer()
my_mic = s_r.Microphone(device_index=1)
try:
with my_mic as source:
print("Recognizing...")
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
query = r.recognize_google(audio)
query = query.lower()
print(f"user said: {query}\n")
except Exception as e:
query = "I am jarvis"
return query
hiddenimports=["tkinter.ttk", "tkinter.font"]
login_root = Tk()
# creating a dictionary with login credentials
cred = {
"<EMAIL>":"Vishal",
"<EMAIL>":"Prasenjit",
"<EMAIL>":"Meyyappan",
"<EMAIL>":"Nidhi",
"<EMAIL>":"Kush",
"<EMAIL>":"Ojas",
"<EMAIL>": "ITI",
"<EMAIL>": "Kartik",
"<EMAIL>": "Aniket"
}
# geometry of the GUI
login_root.geometry("800x400")
login_root.title("Dashboard Login")
l=800
b=400
login_root.minsize(l, b)
login_root.maxsize(l, b)
# title and heading label
title_label=Label(text="Welcome, please enter your credentials", font = "cambria 15 bold", anchor="center")
title_label.pack(pady=30)
# adding info labels
email = Label(text = "Enter Your Email:", font = "cambria 12 bold").place(x=240,y=115)
# assigning the entry values to a string variable
emailvalue = StringVar()
captcha_sucessful = BooleanVar()
captcha_sucessful = False
# adding entry tabs
emailentry = Entry(login_root, text = emailvalue).place(x=400,y=115)
def captcha_login():
global captcha
if emailvalue.get() in cred:
try:
global rand
global data
global img
captcha = random.randint(100000, 999999)
rand=str(captcha)
image = ImageCaptcha(fonts=['DroidSansMono.ttf', 'DroidSansMono.ttf'])
image.write(rand, 'out.png')
global captcha_popup
global captchavalue
captchavalue = StringVar()
captchaentry = Entry(text = captchavalue).place(x=400,y=155)
Button(text = "Login",font = "cambria 12", command = captcha_validation, height=1, width=8).place(x=430,y=210)
Label(text = "Enter Your captcha:", font = "cambria 12 bold").place(x=240,y=155)
Label(text = "Please Enter Captcha And Click Login.", font = "cambria 12", anchor="center").place(x=250,y=360)
img = PhotoImage(file="out.png")
Label(image=img).place(x=230,y=210)
speak(f"Your captcha is {captcha}")
except:
print("Error, unable to create captcha")
else:
global login_not_registered
login_not_registered = Toplevel(login_root)
login_not_registered.geometry("500x100")
login_not_registered.title("User not Registered")
Label(login_not_registered, text = "The user is not Registered, please contact the developers", font = "cambria 12").pack()
Button(login_not_registered, text = "Ok", command = not_registered).pack()
speak("Please check your id and try again")
def not_registered():
login_not_registered.destroy()
def wrong_captcha():
captcha_error.destroy()
def no_captcha():
captcha_not_entered.destroy()
def captcha_validation():
global captcha_sucessful
if captchavalue.get() != "":
if int(captchavalue.get()) == int(captcha):
login_root.destroy()
captcha_sucessful = True
speak("Login Succesfull, Welcome to the Dashboard. Select the filters and I'll show you the desired data.")
else:
global captcha_error
captcha_error = Toplevel(login_root)
captcha_error.geometry("500x100")
captcha_error.title("Invalid captcha!")
Label(captcha_error, text = "Please check and enter the correct captcha again").pack()
Button(captcha_error, text = "OK", command = wrong_captcha).pack()
speak("You have entered a wrong captcha, please try again")
else:
global captcha_not_entered
captcha_not_entered = Toplevel(login_root)
captcha_not_entered.geometry("500x100")
captcha_not_entered.title("No captcha Entered!")
Label(captcha_not_entered, text = "Please enter the captcha").pack()
Button(captcha_not_entered, text = "OK", command = no_captcha).pack()
speak("please enter captcha")
# # adding the submit button
Button(text = "Get captcha",font = "cambria 12", command = captcha_login).place(x=250,y=210)
speak("Hi, please enter your credentials to proceed.")
# # finishing the first window
login_root.mainloop()
# excel data display window appears
data_root = Tk()
data_root.title("Dashboard Status")
data_root.geometry("800x400")
global statusvar
global sbar
# creating a status bar
statusvar = StringVar()
statusvar.set("Ready")
sbar = Label(data_root, textvariable=statusvar, relief=SUNKEN, anchor='e')
sbar.pack(side = BOTTOM, fill = X)
# creating a frame
treeframe = Frame(data_root)
treeframe.pack(expand=True, fill="both")
# creating a treeview
mytree = ttk.Treeview(treeframe)
# creating the scroll bar
# y axis scroll bar
treescrolly = Scrollbar(treeframe, orient = "vertical", command = mytree.yview)
# x axis scroll bar
treescrollx = Scrollbar(treeframe, orient = "horizontal", command = mytree.xview)
# assigning the scroll commands of treeview to the scrollbars we created
mytree.configure(xscrollcommand=treescrollx.set, yscrollcommand=treescrolly.set)
# filling and packing the scroll bars
treescrollx.pack(side = "bottom", fill = "x")
treescrolly.pack(side= "right", fill ="y")
# packing the treeview
mytree.pack(expand=True, fill="both")
# definging columns
column_name = ['Srno', 'Task No.', 'Application Name', 'BAU- Project', 'Package Type', 'Request Type', 'BOPO', 'Package Complexity', 'Start Date', 'End Date', 'H&M Billing Cycle', 'Standard SLA Working Days', 'SLA Measurement in Working Days', 'SLA Met?', 'Remarks']
# defining functions to open the excel file
def Load_excel_data():
mytree["column"] = column_name
mytree["show"] = "headings"
for column in mytree["columns"]:
mytree.heading(column, text = column)
# Convert the start date to datetime64
dataframe_hardcode['Start Date'] = pd.to_datetime(dataframe_hardcode['Start Date'], format='%d-%m-%Y')
dataframe_hardcode['End Date'] = pd.to_datetime(dataframe_hardcode['End Date'], format='%d-%m-%Y')
# function to format the column widhts
def tree_fromat():
mytree.column("#1", width = 35) # SrNo - 0
mytree.column("#2", width = 85) # task Number - 1
mytree.column("#3", width = 470) # application name - 2
mytree.column("#4", width = 95) # BAU/project - 3
mytree.column("#5", width = 90) # package type - 4
mytree.column("#6", width = 140) # request type - 5
mytree.column("#7", width = 55) # BOPO - 6
mytree.column("#8", width = 120) # Package complexity - 7
mytree.column("#9", width = 67) # start date - 8
mytree.column("#10", width = 67) # end date - 9
mytree.column("#11", width = 120) # H&M billing cycle - 10
mytree.column("#12", width = 85) # Standard SLA - 11
mytree.column("#13", width = 105) # SLA Measurement - 12
mytree.column("#14", width = 60) #SLA met - 13
mytree.column("#15", width = 0, stretch = NO) # remarks (removed) - 14
# -----------------------------------------------------------------------------------------------
def show_msi():
for row in data_hardcode:
if row[4] == "MSI":
mytree.insert("", "end", values = row)
def rem_msi():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[4] == "MSI":
mytree.delete(msi)
def show_appv():
for row in data_hardcode:
if row[4] == "APPV":
mytree.insert("", "end", values = row)
def rem_appv():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[4] == "APPV":
mytree.delete(msi)
def show_citrix():
for row in data_hardcode:
if row[4] == "Citrix MSI":
mytree.insert("", "end", values = row)
def rem_citrix():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[4] == "Citrix MSI":
mytree.delete(msi)
def show_normal():
for row in data_hardcode:
if row[5] == "Normal":
mytree.insert("", "end", values = row)
def show_express():
for row in data_hardcode:
if row[5] == "Express":
mytree.insert("", "end", values = row)
def show_super():
for row in data_hardcode:
if row[5] == "Super Express (BOPO)":
mytree.insert("", "end", values = row)
def rem_super():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[5] == "Super Express (BOPO)":
mytree.delete(msi)
def rem_express():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[5] == "Express":
mytree.delete(msi)
def rem_normal():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[5] == "Normal":
mytree.delete(msi)
def show_simple():
for row in data_hardcode:
if row[7] == 'Simple':
mytree.insert("", "end", values = row)
def show_medium():
for row in data_hardcode:
if row[7] == 'Medium':
mytree.insert("", "end", values = row)
def show_complex():
for row in data_hardcode:
if row[7] == 'Complex':
mytree.insert("", "end", values = row)
def rem_simple():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[7] == "Simple":
mytree.delete(msi)
def rem_medium():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[7] == "Medium":
mytree.delete(msi)
def rem_complex():
for msi in mytree.get_children():
values_set = mytree.item(msi)
dataset = values_set["values"]
if dataset[7] == "Complex":
mytree.delete(msi)
def reset_filter():
for row in mytree.get_children():
mytree.delete(row)
Load_excel_data()
tree_fromat()
statusvar.set('All Filters Have been Reset')
def rem_tree():
for row in mytree.get_children():
mytree.delete(row)
showappv_var = IntVar()
showmsi_var = IntVar()
showcitrix_var = IntVar()
shownormal_var = IntVar()
showexpress_var = IntVar()
showsuper_var = IntVar()
showsimple_var = IntVar()
showmedium_var = IntVar()
showcomplex_var = IntVar()
checks_var = [showappv_var, showmsi_var, showcitrix_var]
def filter_apply():
if str(len(mytree.get_children())) == "0":
if showmsi_var.get() == 1:
if showappv_var.get() == 0:
if showcitrix_var.get() == 0:
show_msi()
speak("Showing MSI Packages")
if showappv_var.get() == 1:
if showmsi_var.get() == 0:
if showcitrix_var.get() == 0:
show_appv()
speak("Showing AppV Packages")
if showcitrix_var.get() == 1:
if showmsi_var.get() == 0:
if showappv_var.get() == 0:
show_citrix()
speak("Showing Citrix Packages")
if showappv_var.get() == 1:
if showmsi_var.get() == 1:
if showcitrix_var.get() == 0:
show_msi()
show_appv()
speak("Showing MSI and AppV Packages")
if showcitrix_var.get() == 1:
if showmsi_var.get() == 1:
if showappv_var.get() == 0:
show_msi()
show_citrix()
speak("Showing MSI and Citrix Packages")
if showappv_var.get() == 1:
if showcitrix_var.get() == 1:
if showmsi_var.get() == 0:
show_appv()
show_citrix()
speak("Showing AppV and Citrix Packages")
if showappv_var.get() == 1:
if showmsi_var.get() == 1:
if showcitrix_var.get() == 1:
display_fulldata()
# ________________________________________
if str(len(mytree.get_children())) != "0":
if showmsi_var.get() == 1:
if showappv_var.get() == 0:
if showcitrix_var.get() == 0:
rem_appv()
rem_citrix()
speak("Showing MSI Packages")
if showappv_var.get() == 1:
if showmsi_var.get() == 0:
if showcitrix_var.get() == 0:
rem_msi()
rem_citrix()
speak("Showing AppV Packages")
if showcitrix_var.get() == 1:
if showappv_var.get() == 0:
if showmsi_var.get() == 0:
rem_appv()
rem_msi()
speak("Showing Citrix Packages")
if showmsi_var.get() == 1:
if showappv_var.get() == 1:
if showcitrix_var.get() == 0:
rem_citrix()
speak("Showing MSI and AppV Packages")
if showcitrix_var.get() == 1:
if showappv_var.get() == 1:
if showmsi_var.get() == 0:
rem_msi()
speak("Showing Citrix and AppV Packages")
if showmsi_var.get() == 1:
if showcitrix_var.get() == 1:
if showappv_var.get() == 0:
rem_appv()
speak("Showing MSI and Citrix Packages")
# ------------------------------------------------------------------------------------------------------
def deliveryfilter():
if str(len(mytree.get_children())) == "0":
if shownormal_var.get() == 1:
if showexpress_var.get() == 0:
if showsuper_var.get() == 0:
show_normal()
speak("Showing Normal Packages")
if showexpress_var.get() == 1:
if shownormal_var.get() == 0:
if showsuper_var.get() == 0:
show_express()
speak("Showing Express Packages")
if showsuper_var.get() == 1:
if showexpress_var.get() == 0:
if shownormal_var.get() == 0:
show_super()
speak("Showing Super Express Packages")
if showsuper_var.get() == 1:
if showexpress_var.get() == 1:
if shownormal_var.get() == 0:
show_super()
show_express()
speak("Showing express and Super Express Packages")
if shownormal_var.get() == 1:
if showexpress_var.get() == 1:
if shownormal_var.get() == 0:
show_normal()
show_express()
speak("Showing Normal and Express Packages")
if showsuper_var.get() == 1:
if shownormal_var.get() == 1:
if shownormal_var.get() == 0:
show_super()
show_normal()
speak("Showing Normal and Super Express Packages")
if showsuper_var.get() == 1:
if showexpress_var.get() == 1:
if shownormal_var.get() == 1:
for row in data_hardcode:
mytree.insert("", "end", values = row)
if str(len(mytree.get_children())) != "0":
if shownormal_var.get() == 1:
if showexpress_var.get() == 0:
if showsuper_var.get() == 0:
rem_express()
rem_super()
speak("Showing Normal Packages")
if showexpress_var.get() == 1:
if shownormal_var.get() == 0:
if showsuper_var.get() == 0:
rem_normal()
rem_super()
speak("Showing Express Packages")
if showsuper_var.get() == 1:
if showexpress_var.get() == 0:
if shownormal_var.get() == 0:
rem_express()
rem_normal()
speak("Showing Super Express Packages")
if showsuper_var.get() == 1:
if showexpress_var.get() == 1:
if shownormal_var.get() == 0:
rem_normal()
speak("Showing Express and Super Express Packages")
if shownormal_var.get() == 1:
if showexpress_var.get() == 1:
if showsuper_var.get() == 0:
rem_super()
speak("Showing Normal and Express Packages")
if showsuper_var.get() == 1:
if showsuper_var.get() == 1:
if showexpress_var.get() == 0:
rem_express()
speak("Showing Normal and Super Express Packages")
def complexityfilter():
global applied_complexity
if str(len(mytree.get_children())) == '0':
if showsimple_var.get() == 1:
if showmedium_var.get() == 0:
if showcomplex_var.get() == 0:
show_simple()
speak("Showing Simple Complexity Packages")
if showmedium_var.get() == 1:
if showsimple_var.get() == 0:
if showcomplex_var.get() == 0:
show_medium()
speak("Showing Medium Complexity Packages")
if showcomplex_var.get() == 1:
if showmedium_var.get() == 0:
if showsimple_var.get() == 0:
show_complex()
speak("Showing Complex Complexity Packages")
if showcomplex_var.get() == 1:
if showmedium_var.get() == 1:
if showsimple_var.get() == 0:
show_complex()
show_medium()
speak("Showing Medium and Complex Complexity Packages")
if showsimple_var.get() == 1:
if showmedium_var.get() == 1:
if showcomplex_var.get() == 0:
show_medium()
show_simple()
speak("Showing Simple and Medium Complexity Packages")
if showcomplex_var.get() == 1:
if showsimple_var.get() == 1:
if showmedium_var.get() == 0:
show_complex()
show_simple()
speak("Showing Simple and Complex Complexity Packages")
if showcomplex_var.get() == 1:
if showmedium_var.get() == 1:
if showsimple_var.get() == 1:
display_fulldata()
if str(len(mytree.get_children())) != '0':
if showsimple_var.get() == 1:
if showmedium_var.get() == 0:
if showcomplex_var.get() == 0:
rem_medium()
rem_complex()
speak("Showing Simple Complexity Packages")
if showcomplex_var.get() == 1:
if showmedium_var.get() == 0:
if showsimple_var.get() == 0:
rem_medium()
rem_simple()
speak("Showing Complex Complexity Packages")
if showmedium_var.get() == 1:
if showcomplex_var.get() == 0:
if showsimple_var.get() == 0:
rem_complex()
rem_simple()
speak("Showing Medium Complexity Packages")
if showcomplex_var.get() == 1:
if showmedium_var.get() == 1:
if showsimple_var.get() == 0:
rem_simple()
speak("Showing Medium and Complex Complexity Packages")
if showsimple_var.get() == 1:
if showmedium_var.get() == 1:
if showcomplex_var.get() == 0:
rem_complex()
speak("Showing Simple and Medium Complexity Packages")
if showcomplex_var.get() == 1:
if showsimple_var.get() == 1:
if showmedium_var.get() == 0:
rem_medium()
speak("Showing Simple and Complex Complexity Packages")
def statusbar_update():
complexityapplied = False
typepkgapplied = False
deliveryapplied = False
if showsimple_var.get() == 1 or showmedium_var.get() == 1 or showcomplex_var.get() == 1:
complexityapplied = True
if showmsi_var.get() == 1 or showappv_var.get() == 1 or showcitrix_var.get() == 1:
typepkgapplied = True
if shownormal_var.get() == 1 or showexpress_var.get() == 1 or showexpress_var.get() ==1:
deliveryapplied = True
if complexityapplied == True:
if typepkgapplied == True:
if deliveryapplied == True:
statusvar.set('Applied Package Type, Delivery Type and Complexity Type Filters')
if complexityapplied == True:
if typepkgapplied == True:
if deliveryapplied == False:
statusvar.set('Applied Package Type and Complexity Type Filters')
if complexityapplied == True:
if deliveryapplied == True:
if typepkgapplied == False:
statusvar.set('Applied Delivery Type and Complexity Type Filters')
if deliveryapplied == True:
if typepkgapplied == True:
if complexityapplied == False:
statusvar.set('Applied Package Type and Delivery Type Filters')
if deliveryapplied == True:
if typepkgapplied == False:
if complexityapplied == False:
statusvar.set('Applied Delivery Type Filters')
if deliveryapplied == False:
if typepkgapplied == True:
if complexityapplied == False:
statusvar.set('Applied Package Type Filters')
if deliveryapplied == False:
if typepkgapplied == False:
if complexityapplied == True:
statusvar.set('Applied Complexity Type Filters')
# ------------------------------------------------------------------------------------------------------
def filter_popup():
global filter_toplevel
filter_toplevel = Toplevel(data_root)
# filter_toplevel.geometry("600x400")
filter_toplevel.title("Select Filters")
showappv_box = Checkbutton(filter_toplevel, text = "Show APPV Packages", variable=showappv_var).pack(anchor= "w")
showmsi_box = Checkbutton(filter_toplevel, text = "Show MSI Packages", variable=showmsi_var).pack(anchor= "w")
showcitrix_box = Checkbutton(filter_toplevel, text = "Show Citrix MSI Packages", variable=showcitrix_var).pack(anchor= "w")
shownormal_box = Checkbutton(filter_toplevel, text = "Show Normal Request Packages", variable=shownormal_var).pack(anchor= "w")
showexpress_box = Checkbutton(filter_toplevel, text = "Show Express Request Packages", variable=showexpress_var).pack(anchor= "w")
showsuper_box = Checkbutton(filter_toplevel, text = "Show Super Express BOPO Packages", variable=showsuper_var).pack(anchor= "w")
showsimple_box = Checkbutton(filter_toplevel, text = "Show Normal Complexity Packages", variable=showsimple_var).pack(anchor= "w")
showsmed_box = Checkbutton(filter_toplevel, text = "Show Medium Complexity Packages", variable=showmedium_var).pack(anchor= "w")
showscopmlex_box = Checkbutton(filter_toplevel, text = "Show Complex Packages", variable=showcomplex_var).pack(anchor= "w")
Button(filter_toplevel, text = "Apply", command = filt_apply_final).pack(anchor= "w")
def filt_apply_final():
filter_apply()
deliveryfilter()
complexityfilter()
filter_toplevel.destroy()
statusbar_update()
def end_date_get():
global enddate
global filtered_df
statusvar.set('Removed Previous Filters, Displaying Data that Fits in the Date Range')
enddateset = cal.get_date().split("/")
enddate = (f"{enddateset[2]}-{enddateset[1]}-{enddateset[0]}")
datelabel2.config(text = f"End date is {cal.get_date()}")
cal_toplevel.destroy()
rem_tree()
# Filter dta between two dates
filtered_df = dataframe_hardcode.loc[(dataframe_hardcode['Start Date'] >= f'{startdate}')
& (dataframe_hardcode['End Date'] <= f'{enddate}')]
for row in data_hardcode:
for filteredrow in filtered_df.itertuples():
if filteredrow[1] == row[0]:
mytree.insert("", "end", values = row)
def date_get():
global datelabel2
global startdate
startdateset = cal.get_date().split("/")
startdate = (f"{startdateset[2]}-{startdateset[1]}-{startdateset[0]}")
startdate_button.destroy()
enddate_button.pack()
datelabel = Label(cal_toplevel, text = "")
datelabel.pack()
datelabel2 = Label(cal_toplevel, text= "")
datelabel2.pack()
datelabel.config(text = f"Starting date is {cal.get_date()}")
statusvar.set('Pick End Date')
def calendar_popup():
global cal
global cal_toplevel
global startdate_button
global enddate_button
cr_date = f"{date.today()}"
current_date_set = cr_date.split("-")
current_date = current_date_set[2]
current_month = current_date_set[1]
current_year = current_date_set[0]
cal_toplevel = Toplevel(data_root)
cal_toplevel.geometry("300x250")
cal_toplevel.minsize(300,250)
cal_toplevel.title("Pick a date")
cal = Calendar(cal_toplevel, selectmode = "day", year = int(current_year), month = int(current_month), date = int(current_date), date_pattern = "d/mm/yyyy", mindate = date(2021, 4, 14))
cal.pack()
startdate_button = Button(cal_toplevel, text = "Pick Starting Date", command = date_get)
enddate_button = Button(cal_toplevel, text = "Pick Ending Date", command = end_date_get)
startdate_button.pack()
statusvar.set('Pick a Date')
def display_fulldata():
for row in data_hardcode:
mytree.insert("", "end", values = row)
statusvar.set('Displaying Full Data')
speak("Showing Full Data")
def exporting():
global export_toplevel
global savefilename
savefilename = StringVar()
export_toplevel = Toplevel(data_root)
export_toplevel.geometry("300x125")
export_toplevel.minsize(150,125)
export_toplevel.title("Enter the file name")
Label(export_toplevel, text = 'Enter the FileName').pack()
savenameentry = Entry(export_toplevel, textvariable=savefilename)
savenameentry.pack()
Button(export_toplevel, text = 'Save', command= finalexport).pack()
speak("Preparing to export data")
def finalexport():
global exportdf
export_toplevel.destroy()
try:
statusvar.set('Exporting Data')
sbar.update()
import time
time.sleep(2)
exportdf = | pd.DataFrame(columns=['Srno', 'Task No.', 'Application Name', 'BAU- Project', 'Package Type', 'Request Type', 'BOPO', 'Package Complexity', 'Start Date', 'End Date', 'H&M Billing Cycle', 'Standard SLA Working Days', 'SLA Measurement in Working Days', 'SLA Met?', 'Remarks']) | pandas.DataFrame |
import matplotlib
import matplotlib.pylab as plt
import os
from matplotlib.pyplot import legend, title
from numpy.core.defchararray import array
from numpy.lib.shape_base import column_stack
import seaborn as sns
import pandas as pd
import itertools
import numpy as np
def plot_graph(data, plot_name, figsize, legend):
"""
Plot the input data to latex compatible .pgg format.
"""
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
| pd.set_option('display.width', None) | pandas.set_option |
# coding: utf-8
# In[106]:
from flask import Flask
from flask import request
from flask import jsonify
import pprint
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import NearestNeighbors
app = Flask(__name__)
#create scaler instance
scaler=MinMaxScaler()
# In[3]:
#Import data
whiskey = pd.read_csv('content/data/whisky_subset_ml.csv')
# In[103]:
@app.route("/hello")
def hello():
name = request.args.get('name', '')
pref1 = request.args.get('pref1')
pref2 = request.args.get('pref2')
return "Hello " + name + ". You like: " + pref1 + " " + pref2
@app.route("/whiskme")
def whiskme_ws():
whiskey = request.args.get('whiskey', '')
pref1 = request.args.get('pref1')
pref2 = request.args.get('pref2')
whiskme_result = whiskme(whiskey, pref1, pref2)
pprint.pprint(whiskme_result)
#result = {
# 'input_whiskey': whiskey,
# 'pref1': pref1,
# 'pref2': pref2,
# 'result': str(whiskme_result)
#}
return jsonify(whiskme_result)
def whiskme(input_bottle,pref1,pref2,whiskey_db=whiskey,KNN=False):
#Create numeric df and drop unused fields, create a reference table for ID and distiller
whis=whiskey_db.drop(['Distillery','Postcode','Latitude','Longitude'],axis=1).iloc[:,1:].add(1)
w_ref=whiskey_db[['Distillery','RowID']]
input_idx=w_ref[w_ref['Distillery']==input_bottle].index
#Find consistency weights, grab indices
pr_idx=[w_ref[w_ref['Distillery']==pref1].index,w_ref[w_ref['Distillery']==pref2].index]
weight_temp=(whis.iloc[pr_idx[0],:].values-whis.iloc[pr_idx[1],:].values)
#Compute dispersion ('entropy') amongst preferences
weight=(weight_temp.reshape(12,))*10+1
#.abs().mul(10,axis=0).add(1,axis=0))
#Compute weighted input values
#broadcast values
#arr1=np.transpose(weight)
#S1=pd.Series(weight)
w_in_up=whis.mul(weight)
w_in_dn=whis.div(weight)
#Compute the new Preference match columns, individuals
temp=w_in_dn.iloc[pr_idx[0],:].sum(axis=1).values.reshape(1,)
temp2=pd.DataFrame(w_in_dn.sum(axis=1).values/temp).add(-1,axis=0).abs().add(.1,axis=0)
w_pref1_perc=temp2
w_pref1_perc.columns=['Pref1']
#Compute the new Preference match columns, individuals
temp1=w_in_dn.iloc[pr_idx[1],:].sum(axis=1).values.reshape(1,)
temp3=pd.DataFrame(w_in_dn.sum(axis=1).values/temp1).add(-1,axis=0).abs().add(.1,axis=0)
w_pref2_perc=temp3
w_pref2_perc.columns=['Pref2']
#Rescale the preference match cols
w_pref1_trans=pd.DataFrame(scaler.fit_transform(np.log(w_pref1_perc)), index=w_pref1_perc.index).add(-1,axis=0).abs()
w_pref2_trans=pd.DataFrame(scaler.fit_transform(np.log(w_pref2_perc)), index=w_pref2_perc.index).add(-1,axis=0).abs()
#Combine and avg the pref cols
#join new preference % to table
w_pref_avg=pd.DataFrame( | pd.concat([w_pref1_trans,w_pref2_trans],axis=1) | pandas.concat |
'''
Module with auxiliary functions.
'''
from .xml_reader import read_xml
from gensim.parsing.preprocessing import strip_non_alphanum
from gensim.parsing.preprocessing import strip_multiple_whitespaces
from gensim.matutils import Sparse2Corpus
from string import punctuation
from nltk.corpus import stopwords
import pandas as pd
from num2words import num2words
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def build_sentences_from_tokens(tokens):
""" Function used to rebuild the sentences from the tokens returned by the pipeline """
sentences = []
tmp_sentence = []
for elem in tokens:
if elem == "EOS":
tmp_sentence = ' '.join(tmp_sentence)
sentences.append(tmp_sentence)
tmp_sentence = []
else:
tmp_sentence.append(elem)
return sentences
def write_data_to_file(data, filename):
""" Function used to debug the corpus state during preprocessing """
if isinstance(data, pd.DataFrame):
data_to_print = data.values.tolist()
else:
data_to_print = data
with open(filename, 'w') as f:
for item in data_to_print:
f.write("%s\n" % item)
def write_features_to_csv(pairs, features, filename):
""" Function used to write the features dataframe to a .csv file. """
ids = []
for pair in pairs:
ids.append(pair.id)
features_dataframe = | pd.DataFrame(features) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numpy.random import default_rng
#///////////////////// miscellaneous functions starts here
def cAngle(i):
x=i % 360
return x
def weib(x,A,k): #A is the scale and k is the shape factor
return (k / A) * (x / A)**(k - 1) * np.exp(-(x / A)**k) #This function show the probabilty of occurence of a specific wind speed.
def weib_cumulative(x,A,k): #A is the scale and k is the shape factor
return 1-np.exp(-1*(x/A)**k) #This function show the probabilty of occurence of a specific wind speed.
#///////////////////// miscellaneous functions ends here
class environment:
"""
Creates the stand-alone environment and returns it with the given unique ID. By default, wind speeds from 0 m/s to 50 m/s with an increment of 0.5 m/s and also 360 degree with 1 degree increment are also added. The temperature of 25 degree Celsius and pressure of 101325 Pa is assumed. See example below:
:param uniqueID: [*req*] the given unique ID.
:Example:
>>> Env = environment("C_Env")
>>> #Creates an environment without assigning it to any wind farm.
>>> print(Env.info.keys())
dict_keys(['Wind directions', 'Sectors', 'Wind speeds', 'Pressure', 'Temperature', 'Wind probability', 'Scale parameter of wind distribution', 'Shape parameter of wind distribution'])
>>> print(Env.info['Wind directions']) #doctest:+ELLIPSIS
[0, 1, 2, 3, ...]
>>> print(Env.info['Wind speeds']) # doctest:+ELLIPSIS
[0.0, 0.5, 1.0, 1.5, 2.0, ...]
\\----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
created_environments=[]
def __init__(self, uniqueID):
if uniqueID in environment.created_environments: #Checks if the environment ID is already taken
raise Exception ("The environment unique ID [" + str(uniqueID) + "] is already taken.")
else:
if type(uniqueID) == str and len(uniqueID.split())==1:
if uniqueID in globals().keys(): #Checks if the given unique Id is not in conflict with user's already assigned variables.
raise Exception ("Another object with the same uniqe ID globally exists. New environment not created.")
else:
globals()[uniqueID] = self #environment is dynamicall created and referenced with the unique ID to the users assigned variable.
environment.created_environments.append(uniqueID) #we append the created environment to the list
self.uID=uniqueID
else:
raise Exception("Unique ID should be a string without spaces.")
self.__conditionsDic={}
self.__conditionsDic["Wind directions"]=[i for i in range(0,360)] #degrees
self.__conditionsDic["Sectors"] = None
self.__conditionsDic["Wind speeds"]=[i for i in np.arange(0,30.5,0.5)] #m/s
self.__conditionsDic["Pressure"]= 101325 #pascals
self.__conditionsDic["Air Density [kg/m^3]"]=1.225
self.__conditionsDic["Temperature"]=25 #celcius
self.__conditionsDic["Wind probability"]=[] #how often the wind blows in this sector
self.__conditionsDic["Scale parameter of wind distribution"]=[] # the scale parameter of the wind distribution in the particular sector in m/s
self.__conditionsDic["Shape parameter of wind distribution"]=[] # the shape parameter of the wind distribution in the particular sector
self.windSectors=None
@property
def info(self):
"""
Returns all the defined conditions of the environment.
:param None:
:Example:
>>> dantysk=windfarm("DanTysk")
>>> env=environment("D_Env")
>>> print(env.info.keys())
dict_keys(['Wind directions', 'Sectors', 'Wind speeds', 'Pressure', 'Temperature', 'Wind probability', 'Scale parameter of wind distribution', 'Shape parameter of wind distribution'])
>>> print(env.info['Wind directions']) # doctest:+ELLIPSIS
[0, 1, 2, 3, ...]
>>> print(env.info['Wind speeds']) # doctest:+ELLIPSIS
[0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, ...]
\----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
return self.__conditionsDic
def windConditions(self,windProbability=[1/12 for i in range(12)], aParams=[7 for i in range(12)], kParams=[2.5 for i in range(12)]):
"""
Creates and assigns the given wind conditions to the related environment. Returns the result as a data frame.
Divides the 360 degrees to given number of sectors. By default it divides to 12 sectors and assigns the 12 standard names for every sector e.g. N_0 starts from 346 degrees and ends at 15 degrees.
:param windProbability: [*opt*] the probabiliyt of wind presence in each sector, by default equal to 1/12.
:param aParams: [*opt*] the scale factor of the weibull distribution of the wind in the sector, by default equal to 7 m/s .
:param kParams: [*opt*] the shape factor of the weibull distribution of the wind int the sector, by default equla to 2.5.
:Example:
>>> from PyWinda import pywinda as pw
>>> Env=pw.environment("C_Env2")
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
#TODO che the example of the windConditions purpose.
self.__conditionsDic["Wind probability"]=windProbability
self.__conditionsDic["Scale parameter of wind distribution"]=aParams
self.__conditionsDic["Shape parameter of wind distribution"]=kParams
def makeSectors(self,n=12,sectorNames=["N_0","NNE_30","NEN_60","E_90","ESE_120","SSE_150","S_180","SSW_210","WSW_240","W_270","WNW_300","NNW_330"]):#by default the function will divide the sector in 12 regions
"""
Creates the given sectors to the related environment. Returns the result as a data frame.
Divides the 360 degrees to given number of sectors. By default it divides to 12 sectors and assigns the 12 standard names for every sector e.g. N_0 starts from 346 degrees and ends at 15 degrees.
:param n: [*opt*] the number of sectors.
:param sectorNames: [*opt*] names of the sectors given by user or default names for n=12.
:Example:
>>> Env=environment("C_Env2")
>>> print(Env.makeSectors())
N_0 NNE_30 NEN_60 E_90 ... WSW_240 W_270 WNW_300 NNW_330
0 346.0 16.0 46.0 76.0 ... 226.0 256.0 286.0 316.0
1 347.0 17.0 47.0 77.0 ... 227.0 257.0 287.0 317.0
2 348.0 18.0 48.0 78.0 ... 228.0 258.0 288.0 318.0
3 349.0 19.0 49.0 79.0 ... 229.0 259.0 289.0 319.0
4 350.0 20.0 50.0 80.0 ... 230.0 260.0 290.0 320.0
5 351.0 21.0 51.0 81.0 ... 231.0 261.0 291.0 321.0
6 352.0 22.0 52.0 82.0 ... 232.0 262.0 292.0 322.0
7 353.0 23.0 53.0 83.0 ... 233.0 263.0 293.0 323.0
8 354.0 24.0 54.0 84.0 ... 234.0 264.0 294.0 324.0
9 355.0 25.0 55.0 85.0 ... 235.0 265.0 295.0 325.0
10 356.0 26.0 56.0 86.0 ... 236.0 266.0 296.0 326.0
11 357.0 27.0 57.0 87.0 ... 237.0 267.0 297.0 327.0
12 358.0 28.0 58.0 88.0 ... 238.0 268.0 298.0 328.0
13 359.0 29.0 59.0 89.0 ... 239.0 269.0 299.0 329.0
14 0.0 30.0 60.0 90.0 ... 240.0 270.0 300.0 330.0
15 1.0 31.0 61.0 91.0 ... 241.0 271.0 301.0 331.0
16 2.0 32.0 62.0 92.0 ... 242.0 272.0 302.0 332.0
17 3.0 33.0 63.0 93.0 ... 243.0 273.0 303.0 333.0
18 4.0 34.0 64.0 94.0 ... 244.0 274.0 304.0 334.0
19 5.0 35.0 65.0 95.0 ... 245.0 275.0 305.0 335.0
20 6.0 36.0 66.0 96.0 ... 246.0 276.0 306.0 336.0
21 7.0 37.0 67.0 97.0 ... 247.0 277.0 307.0 337.0
22 8.0 38.0 68.0 98.0 ... 248.0 278.0 308.0 338.0
23 9.0 39.0 69.0 99.0 ... 249.0 279.0 309.0 339.0
24 10.0 40.0 70.0 100.0 ... 250.0 280.0 310.0 340.0
25 11.0 41.0 71.0 101.0 ... 251.0 281.0 311.0 341.0
26 12.0 42.0 72.0 102.0 ... 252.0 282.0 312.0 342.0
27 13.0 43.0 73.0 103.0 ... 253.0 283.0 313.0 343.0
28 14.0 44.0 74.0 104.0 ... 254.0 284.0 314.0 344.0
29 15.0 45.0 75.0 105.0 ... 255.0 285.0 315.0 345.0
<BLANKLINE>
[30 rows x 12 columns]
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
sectorSpan = 360 / n
eachS2E=[i for i in np.arange(1 - sectorSpan / 2, 360, sectorSpan)] #this makes a set of starts to end of each sector such that first sector starts from 0+1-sectorSpan / 2 goes to 360 (excluding 360) and the distance between consecutive units is equal to sectorSpan. The +1 makes sure that the sector starts and ends in the correct place. For example sector E_90 with n=12 starts from 90-30+1=61 and ends at 90+30=120
sectorsDic = {}
sectorNamesToReturn=sectorNames #this by default, of course user can give his/her own names as well.
if n!=12: #After user give n other than 12, user can either give sectorNames or leave it, if left the script makes names automatically by assigning half othe span of the sector as the name of the sector
if len(sectorNames)==12:
sectorNamesToReturn = [str(i) for i in np.arange(0,360,sectorSpan)]
elif len(sectorNames)!=12:
sectorNamesToReturn=sectorNames
if n == len(sectorNamesToReturn) and type(n) == int and n > 0: #this makes sure n is an integer and that the number of given sectors is equal to n if defined by user.
for i in range(n):
sectorsDic[sectorNamesToReturn[i]]=[cAngle(temp) for temp in np.arange(eachS2E[i],eachS2E[i+1],1)]
self.windSectors=sectorsDic
self.__conditionsDic["Sectors"]=sectorsDic
return pd.DataFrame(sectorsDic)
else:
raise Exception("Number of sectors and proposed number of names are not equal.")
def probabilityDistribution(self,aParams=[],kParams=[],probabs=[],avgWindSpeeds=[]):
if len(aParams)|len(kParams)|len(probabs)|len(avgWindSpeeds)!=len(self.windSectors):
raise Exception("Number of given parameters and existing number of sectors are not equal")
else:
pdDic={}
SectorNames=self.__conditionsDic["Sectors"].keys()
for index,i in enumerate(SectorNames):
pdDic[i]=[aParams[index],kParams[index],probabs[index],avgWindSpeeds[index]]
self.__conditionsDic["probabilityDistribution"]=pdDic
print(pdDic)
# self.__conditionsDic["ProbabilityDistributions"]=pdDic
# print(len(self.windSectors))
def test(self):
return self.uID
class windfarm:
"""
Creates wind farm object with the given unique ID. Pywinda will also create an internal shallow copy of the same windfarm object.
:param uniqueID: [*req*] Unique Id of the wind farm as a string.
:Example:
>>> from PyWinda import pywinda as pw
>>> curslack = pw.windfarm("Curslack_uID")
>>> print(pw.Curslack_uID==curslack)
True
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
created_windfarms=[]
def __init__(self,uniqueID,lifetime=25*365*24*3600):
if uniqueID in windfarm.created_windfarms: #Checks if the wind farm ID is already taken
raise Exception ("The wind farm unique ID [" + str(uniqueID) + "] is already taken.")
else:
if type(uniqueID) == str and len(uniqueID.split())==1:
if uniqueID in globals().keys(): #Checks if the given unique Id is not in conflict with user's already assigned variables.
raise Exception ("Another object with the same uniqe ID globally exists. New wind farm not created.")
else:
globals()[uniqueID] = self #wind farm is dynamicall created and referenced with the unique ID to the users assigned variable.
windfarm.created_windfarms.append(uniqueID) #we append the created wind farm to the list
self.uID=uniqueID
else:
raise Exception("Unique ID should be a string without spaces.")
self.createdSRTs=[] #This is the store dictionary. Stores the wind turbine reference names created in a particular wind farm
self.createdMRTs=[]
self.farmEnvironment=None #A wind farm will have only one environment
self.__numOfSRT=len(self.createdSRTs)
self.__numOfMRT=len(self.createdMRTs)
self.__allDistances=pd.DataFrame()
self.lifetime=lifetime #by default 25 years in seconds
@property #This helps to protect the info from direct changes by user
def info(self):
"""
Returns a data frame containing all the information about the wind farm.
:param None:
:Example:
>>> from PyWinda import pywinda as pw
>>> curslack=pw.windfarm("uID_Curslack")
>>> WT1=curslack.addTurbine('uID_WT1',turbineType='SRT',hubHeigt=120, x_horizontal=100,y_vertical=100)
>>> WT2=curslack.addTurbine('uID_WT2',turbineType='SRT',hubHeigt=120, x_horizontal=150,y_vertical=150)
>>> WT3=curslack.addTurbine('uID_MWT3',turbineType='MRT',hubHeigt=200, x_horizontal=300,y_vertical=300)
>>> print(curslack.info)
Property Value
0 Unique ID uID_Curslack
1 Created SRTs [uID_WT1, uID_WT2]
2 Created MRTs [uID_MWT3]
3 Number of SRTs 2
4 Number of MRTs 1
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
statistics={"Property":["Unique ID","Created SRTs", "Created MRTs","Number of SRTs","Number of MRTs"],
"Value":[self.uID,self.createdSRTs,self.createdMRTs,self.__numOfSRT,self.__numOfMRT]}
return pd.DataFrame(statistics)
@property
def assets(self):
"""
Returns all the unique IDs of all the assets in the windfarm e.g. single rotor turbines, multirotor tubines, met masts, etc.
:param None:
:Example:
>>> from PyWinda import pywinda as pw
>>> curslack=pw.windfarm("uID_Curslack2")
>>> WT1=curslack.addTurbine('uID_WT11',turbineType='SRT',hubHeigt=120)
>>> WT2=curslack.addTurbine('uID_WT12',turbineType='SRT',hubHeigt=120)
>>> WT3=curslack.addTurbine('uID_MWT13',turbineType='MRT',hubHeigt=200)
>>> print(curslack.assets)
['uID_WT11', 'uID_WT12', 'uID_MWT13']
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
self.allassets=self.createdSRTs+self.createdMRTs#keeps the record of all assets in the wind farm
return self.allassets
def addRefTurbine(self, uniqueID, reference='NREL'): ##This function helps to create a reference wind turbine and keep internal (inside the class) track of its name. It is not a deep copy, rather a reference.
"""
By default adds a single rotor turbine (SRT) rference turbine to the related windfarm. Returns the created wind turbine with the given unique ID.
The wind turbine would be callable via its unique name and via the assigned variable by user. Note that the referenced unique id is stored in library. Thus when calling the turbine via unique id, it should be prefixed by library name pywinda. See example below.
:param uniqueID: [*req*] Unique ID of the wind turbine as string
:param reference: [*opt*] Choose among 'NREL-5MW' or 'DTU-10MW' reference turbines
:Example:
>>> from PyWinda import pywinda as pw
>>> DanTysk=pw.windfarm('DanTysk01')
>>> WT1=DanTysk.addRefTurbine('Turbine1',reference='NREL')
>>> print(pw.Turbine1.info)
Property Value
0 Unique Name Turbine1
1 x_horizontal NaN
2 y_vertical NaN
3 Diameter 120
4 Hub height 120
5 Area 11309.733553
6 Windspeeds [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, ...
7 CP [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.198, 0.313, 0...
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if uniqueID in self.createdSRTs: # Checks if the given unique Id already exists in the wind farm
raise Exception("A wind turbine with the same unique ID in wind farm [ "+str(self.uID) +
" ] already exists. New turbine not added.")
else:
if type(uniqueID) == str and len(uniqueID.split()) == 1:
if uniqueID in globals().keys(): # Checks if the given unique Id is not in conflict with user's already assigned variables.
raise Exception("A wind turbine witht the same uniqe ID globally exists. New turbine not added.")
else:
if reference == "NREL":
NRELPower = [
0.19800,
0.31300,
0.37712,
0.41525,
0.44068,
0.45700,
0.46716,
0.47458,
0.47775,
0.47775,
0.47775,
0.47775,
0.47775,
0.47775,
0.47564,
0.47564,
0.47246,
0.46822,
0.45551,
0.40148,
0.35487,
0.31568,
0.28178,
0.25318,
0.22775,
0.20551,
0.18538,
0.16949,
0.15466,
0.14089,
0.12924,
0.11864,
0.10911,
0.10064,
0.09322,
0.08686,
]
ws = [3.0,
3.5,
4.0,
4.5,
5.0,
5.5,
6.0,
6.5,
7.0,
7.5,
8.0,
8.5,
9.0,
9.5,
10.0,
10.5,
11.0,
11.4,
11.5,
12.0,
12.5,
13.0,
13.5,
14.0,
14.5,
15.0,
15.5,
16.0,
16.5,
17.0,
17.5,
18.0,
18.5,
19.0,
19.5,
20.0,
]
globals()[uniqueID] = toUserVariable = SRT(uniqueID, diameter=126, hubHeigt=120,
ws=ws,
cp=NRELPower) # windfarm class is dynamicall created and referenced with the unique ID to the users assigned variable.
self.__numOfSRT += 1
self.createdSRTs.append(uniqueID)
elif reference == "DTU":
globals()[uniqueID] = toUserVariable = SRT(uniqueID, diameter=150, hubHeigt=150,
) # windfarm class is dynamicall created and referenced with the unique ID to the users assigned variable.
self.__numOfSRT += 1
self.createdSRTs.append(uniqueID)
else:
raise Exception("Turbine type not supported")
else:
raise Exception(
"Name should be a string without spaces. The assignment should be done via the UID and not the variable name.")
return toUserVariable
def addTurbine(self,uniqueID,turbineType="SRT",diameter=float("NaN"),hubHeigt=float("NaN"),x_horizontal=float("NaN"),y_vertical=float("NaN"),ws=[],cp=[]): ##This function helps to create a wind turbine and keep internal (inside the class) track of its name. It is not a deep copy, rather a reference.
"""
By default adds a single rotor turbine (SRT) to the related windfarm. Returns the created wind turbine with the given unique ID.
The wind turbine would be callable via its unique name and via the assigned variable by user. Note that the referenced unique id is stored in library. Thus when calling the turbine via unique id, it should be prefixed by library name pywinda. See example below.
:param uniqueID: [*req*] Unique ID of the wind turbine as string
:param turbineType: [*opt*] Type of turbine as string: 'SRT' or 'MRT'
:param diameter: [*opt*] Diameter of the turbine as float
:param hubHeigt: [*opt*] Hub height as a float
:param x_horizontal: [*opt*] Horizontal coordinate of the turbine as float
:param y_vertical: [*opt*] Vertical coordinate of the the turbine as float
:Example:
>>> from PyWinda import pywinda as pw
>>> curslack=pw.windfarm("uID_Curslack3")
>>> WT1=curslack.addTurbine('uID_WT14',turbineType='SRT',hubHeigt=120 )
>>> WT2=curslack.addTurbine('uID_WT15',turbineType='SRT',x_horizontal=150,y_vertical=150)
>>> WT3=curslack.addTurbine('uID_WT16',turbineType='MRT',hubHeigt=200,x_horizontal=300,y_vertical=300)
>>> WT3.diameter=150 #Assiging WT3 diameter after creation.
>>> print(WT3==pw.uID_WT16)
True
>>> print(WT3.diameter)
150
>>> WT4=curslack.addTurbine('uID_WT16')
Traceback (most recent call last):
Exception: A wind turbine witht the same uniqe ID globally exists. New turbine not added.
>>> WT5=curslack.addTurbine('uID WT16')
Traceback (most recent call last):
Exception: Name should be a string without spaces. The assignment should be done via the UID and not the variable name.
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if uniqueID in self.createdSRTs: #Checks if the given unique Id already exists in the wind farm
raise Exception ("A wind turbine with the same unique ID in wind farm [ "+str(self.uID)+" ] already exists. New turbine not added.")
else:
if type(uniqueID) == str and len(uniqueID.split())==1:
if uniqueID in globals().keys(): #Checks if the given unique Id is not in conflict with user's already assigned variables.
raise Exception("A wind turbine witht the same uniqe ID globally exists. New turbine not added.")
else:
if turbineType=="SRT":
globals()[uniqueID] = toUserVariable = SRT(uniqueID,diameter=diameter,hubHeigt=hubHeigt,x_horizontal=x_horizontal,y_vertical=y_vertical,ws=ws,cp=cp) # windfarm class is dynamicall created and referenced with the unique ID to the users assigned variable.
self.__numOfSRT += 1
self.createdSRTs.append(uniqueID)
elif turbineType=="MRT":
globals()[uniqueID] = toUserVariable = MRT(uniqueID,diameter=diameter,hubHeigt=hubHeigt,x_horizontal=x_horizontal,y_vertical=y_vertical,ws=ws,cp=cp) # windfarm class is dynamicall created and referenced with the unique ID to the users assigned variable.
self.__numOfMRT += 1
self.createdMRTs.append(uniqueID)
else:
raise Exception ("Turbine type not supported")
else:
raise Exception ("Name should be a string without spaces. The assignment should be done via the UID and not the variable name.")
return toUserVariable
def assignEnvironment(self,envName):
"""
Assigns an already created environment to the referenced wind farm. Parameters of the environment (e.g. temperature, pressure, wind regime etc.) can be assigned later.
The environment would be callable via its unique name and the assigned variable by user. When using the unique Id, it should be prefixed witht he library name pywinda. See example.
:param envName: [*req*] unique nvironment name
:Example:
>>> from PyWinda import pywinda as pw
>>> DanTysk=pw.windfarm("DanTysk2")
>>> env=pw.environment('normal1')
>>> print(env.info.keys()) #shows some of the conditions of the created environment
dict_keys(['Wind directions', 'Sectors', 'Wind speeds', 'Pressure', 'Temperature', 'Wind probability', 'Scale parameter of wind distribution', 'Shape parameter of wind distribution'])
>>> print(env.info['Pressure'])
101325
>>> DanTysk.assignEnvironment('normal1')
>>> DanTysk.assignEnvironment('normal2')
Traceback (most recent call last):
Exception: The wind farm [DanTysk2] already has assigned environment [normal1]. New environment not added.
>>> print(pw.normal1==env)
True
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if self.farmEnvironment!=None: #Checks if the wind farm already have an associated environment
raise Exception ("The wind farm ["+ str(self.uID)+"] already has assigned environment ["+str(self.farmEnvironment)+"]. New environment not added.")
else:
if type(envName) == str and len(envName.split())==1:
if envName in environment.created_environments: #Checks if the given unique Id is not in conflict with user's already assigned variables.
self.farmEnvironment=envName
else:
raise Exception ("Environment doesn't exist. Please make sure you first create the environment and then assign it.")
# globals()[envName] = toUserVariable = environment(envName) # environment is dynamicall created and referenced with the unique ID to the users assigned variable.
# self.farmEnvironment=envName
else:
raise Exception ("Name should be a string without spaces. The assignment should be done via the UID and not the variable name.")
# return toUserVariable #doesn't return any value, depricated
def distances(self, assets=[]):#From this point there would be a global convention of naming the property which shares two turbines in a "from" to "to" convention. For example distanceWT1toWT2 means the distance from WT1 to WT2
"""
Returns the data frame with all the distances between assets in the wind farm or between those given in the assets list.
:param assets: [*opt*] Unique ID or object name of the assets
:Example:
>>> from PyWinda import pywinda as pw
>>> Curslack2 = pw.windfarm("Curslack_farm1")
>>> WT1 = Curslack2.addTurbine("C_WT01", x_horizontal=480331, y_vertical=4925387)
>>> WT2 = Curslack2.addTurbine("C_WT02", x_horizontal=480592, y_vertical=4925253)
>>> WT3 = Curslack2.addTurbine("C_WT03", x_horizontal=480886, y_vertical=4925166)
>>> WT4 = Curslack2.addTurbine("C_MWT04",x_horizontal=480573, y_vertical=4925712)
>>> print(Curslack2.distances())
Assets C_WT01 C_WT02 C_WT03 C_MWT04
0 C_WT01 0.000000 293.388821 597.382624 405.202419
1 C_WT02 293.388821 0.000000 306.602348 459.393078
2 C_WT03 597.382624 306.602348 0.000000 629.352842
3 C_MWT04 405.202419 459.393078 629.352842 0.000000
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if len(assets)==0: #The user should give the set of turbines here, if not the function will calculate and return all the distances between all the turbines in that wind farm.
distancesDic={}
distancesDic["Assets"]=self.assets
for asset in self.assets:
distancesDic[asset] = []
for i in range(len(self.assets)):
deltax=globals()[asset].x_horizontal-globals()[self.assets[i]].x_horizontal
deltay=globals()[asset].y_vertical-globals()[self.assets[i]].y_vertical
distance=((deltax**2)+(deltay**2))**(0.5)
distancesDic[asset].append(distance)
df=pd.DataFrame(distancesDic)
return df
else: #This part will work for the user's given set of turbines manually
print("To be done for a given set of turbines' unique names")
return "Under development"
def coordinates(self, assets=[]):
"""
Returns the data frame with all assets' x and y coordinates if the assets list is empty, otherwise only for the given set of assets.
:param assets: [*opt*] Unique ID or object name of the assets
:Example:
>>> from PyWinda import pywinda as pw
>>> Curslack = pw.windfarm("Curslack_farm01")
>>> WT1 = Curslack.addTurbine("C_WT11", x_horizontal=480331, y_vertical=4925387)
>>> WT2 = Curslack.addTurbine("C_WT2", x_horizontal=480592, y_vertical=4925253)
>>> WT3 = Curslack.addTurbine("C_WT3", x_horizontal=480886, y_vertical=4925166)
>>> WT4 = Curslack.addTurbine("C_MWT4",x_horizontal=480573, y_vertical=4925712)
>>> print(Curslack.coordinates())
Assets x_coor y_coor
C_WT11 480331 4925387
C_WT2 480592 4925253
C_WT3 480886 4925166
C_MWT4 480573 4925712
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if len(assets) == 0:
coordinatesDic={}
coordinatesDic["Assets"]=["x_coor","y_coor"]
for asset in self.assets:
coordinatesDic[asset]=[globals()[asset].x_horizontal,globals()[asset].y_vertical]
toReturn=pd.DataFrame(coordinatesDic)
return toReturn.set_index('Assets').transpose()
else:
print("To be done for a given set of turbines' unique names")
return "Under development"
def run(self,wakeModel=None,randomWind=None):
AEP = 0
RunReport = {}
if self.farmEnvironment==None:
raise Exception ('The windfarm has no uniqe environment associated with it.')
if len(self.assets)==0:
raise Exception ('The wind farm has no turbine assigned to it.')
else:
if wakeModel==None and randomWind==None: #no information about the wake effects are considered thus calculating power without need of infomration about the turbine locations
unique_env=globals()[self.farmEnvironment].info #the environemtn of the wind farm, running the info function here updates the list of all assets as well.
if unique_env["Sectors"]!=None: #checks if the sectors are created
if len(unique_env["Sectors"])==len(unique_env["Wind probability"])==len(unique_env["Scale parameter of wind distribution"])==len(unique_env["Shape parameter of wind distribution"]): #checks if the wind conditions are declared correctly
for index,turbine in enumerate(self.allassets): #loops through all the available assets specifically only the SRTs and MRTs, pywinda currently doesn't support other assets like platforms and metmasts
statistics_dictionary = {}
sectors = []
directions = []
probab_any_wind = [] #defined foe each secotr
probab_specific_wind = [] #defined for every degree
total_probab = []
windspeeds = []
time_fraction = []
cp = []
energy = []
for ind,sectorname in enumerate(unique_env["Sectors"]): # this all the sectors
probability_per_unit=unique_env["Wind probability"][ind]/len(unique_env["Sectors"][sectorname]) #divides further the probability assigned for one sector to all the degrees inside a sector.
afactor_persector=unique_env["Scale parameter of wind distribution"][ind]
kfactor_persector=unique_env["Shape parameter of wind distribution"][ind]
for index,unit in enumerate(unique_env["Sectors"][sectorname]):
for indexx,windspeed in enumerate(unique_env['Wind speeds']):
# maxsize=len(unique_env['Wind speeds'])-1
# print(maxsize)
sectors.append(sectorname)
directions.append(unit) #unit is actually the degrees
probab_any_wind.append(probability_per_unit)
if indexx==0:
probabperws=weib_cumulative(unique_env['Wind speeds'][indexx]+0.25,afactor_persector,kfactor_persector)-weib_cumulative(0,afactor_persector,kfactor_persector)
probab_specific_wind.append(probabperws)
else:
probabperws=weib_cumulative(unique_env['Wind speeds'][indexx]+0.25,afactor_persector,kfactor_persector)-weib_cumulative(unique_env['Wind speeds'][indexx]-0.25,afactor_persector,kfactor_persector)
probab_specific_wind.append(probabperws)
total_probab_here = probability_per_unit*probabperws
total_probab.append(total_probab_here)
windspeeds.append(windspeed)
time_fraction.append(total_probab_here*365*24*3600)
cpLoop=globals()[str(turbine)].interp_cp[indexx]
cp.append(cpLoop)
energy.append(0.5*globals()[str(self.farmEnvironment)].info["Air Density [kg/m^3]"]*globals()[str(turbine)].area*windspeed**3*cpLoop*365*24*total_probab_here)
statistics_dictionary['Sectors'] = sectors
statistics_dictionary['Directions[degrees]']=directions
statistics_dictionary['Probability_of_any_windspeed']=probab_any_wind
statistics_dictionary['Probability_of_specific_windspeed']=probab_specific_wind
statistics_dictionary['Total_probability']=total_probab
statistics_dictionary['Wind_speeds[m/s]']=windspeeds
statistics_dictionary['Time_fraction[s]']=time_fraction
statistics_dictionary['CP']=cp
statistics_dictionary['Produced_energy[Wh]']=energy
print(np.sum(total_probab))
final_statistics = pd.DataFrame(statistics_dictionary)
RunReport[str(turbine)+'_statistics']=final_statistics
RunReport[str(turbine)+'_AEP[MWh]']=np.sum([energy])/1000000 #total AEP in MWh
AEP=AEP+np.sum([energy])/1000000
RunReport['Windfarm_AEP[MWh]']=AEP
return RunReport
else:
raise Exception ('The dimentions of wind conditions array does not match that of the introduced sectors')
else:
raise Exception('The environment sectors are not declared. Use the function makeSectors() to implement the PyWinda default sectors definition.')
if wakeModel==None and randomWind!=None: #no information about the wake effects are considered thus calculating power without need of infomration about the turbine locations
rtg=default_rng()
unique_env=globals()[self.farmEnvironment].info #the environemtn of the wind farm, running the info function here updates the list of all assets as well.
bins_plan = [0] #0 is added from the begining to the bins
for wsp in unique_env['Wind speeds']:
bins_plan.append(wsp + 0.25)
bins_plan[-1] = unique_env['Wind speeds'][-1] #end of the bins_plan array is replaced back with the end of the possible wind speeds, size of the bins_plan is one more than the wind speeds
if unique_env["Sectors"]!=None: #checks if the sectors are created
if len(unique_env["Sectors"])==len(unique_env["Wind probability"])==len(unique_env["Scale parameter of wind distribution"])==len(unique_env["Shape parameter of wind distribution"]): #checks if the wind conditions are declared correctly
for index,turbine in enumerate(self.allassets): #loops through all the available assets specifically only the SRTs and MRTs, pywinda currently doesn't support other assets like platforms and metmasts
statistics_dictionary = {}
sectors = []
directions = []
probab_any_wind = [] #defined foe each secotr
probab_specific_wind = [] #defined for every degree
total_probab = []
windspeeds = []
time_fraction = []
cp = []
energy = []
for ind,sectorname in enumerate(unique_env["Sectors"]): # this all the sectors
probability_per_unit=unique_env["Wind probability"][ind]/len(unique_env["Sectors"][sectorname]) #divides further the probability assigned for one sector to all the degrees inside a sector.
afactor_persector=unique_env["Scale parameter of wind distribution"][ind]
kfactor_persector=unique_env["Shape parameter of wind distribution"][ind]
randomWinds=afactor_persector*rtg.weibull(kfactor_persector,randomWind) #creates the sample wind with a lot of numbers
all_probabilities,bins_ignored=np.histogram(randomWinds,bins=bins_plan,density=True)#splites the produced winds
all_probabilities=all_probabilities*0.5 #the probability density fucntion has a width of only 0.5 thus the area under one bar is calcualted here
# print(np.sum(all_probabilities))
# print()
for index,unit in enumerate(unique_env["Sectors"][sectorname]):
for indexx,windspeed in enumerate(unique_env['Wind speeds']):
sectors.append(sectorname)
directions.append(unit) #unit is actually the degrees
probab_any_wind.append(probability_per_unit)
if indexx==0:
probabperws = all_probabilities[indexx]
probab_specific_wind.append(probabperws)
else:
probabperws = all_probabilities[indexx]
probab_specific_wind.append(probabperws)
total_probab_here = probability_per_unit*probabperws
total_probab.append(total_probab_here)
windspeeds.append(windspeed)
time_fraction.append(total_probab_here*365*24*3600)
cpLoop=globals()[str(turbine)].interp_cp[indexx]
cp.append(cpLoop)
energy.append(0.5*globals()[str(self.farmEnvironment)].info["Air Density [kg/m^3]"]*globals()[str(turbine)].area*windspeed**3*cpLoop*365*24*total_probab_here)
statistics_dictionary['Sectors'] = sectors
statistics_dictionary['Directions[degrees]']=directions
statistics_dictionary['Probability_of_any_windspeed']=probab_any_wind
statistics_dictionary['Probability_of_specific_windspeed']=probab_specific_wind
statistics_dictionary['Total_probability']=total_probab
statistics_dictionary['Wind_speeds[m/s]']=windspeeds
statistics_dictionary['Time_fraction[s]']=time_fraction
statistics_dictionary['CP']=cp
statistics_dictionary['Produced_energy[Wh]']=energy
# print(np.sum(total_probab))
final_statistics = | pd.DataFrame(statistics_dictionary) | pandas.DataFrame |
import pandas as pd
from pandas import HDFStore
import numpy as np
import subprocess
import io
import matplotlib.pyplot as plt
import gc
import os
from scipy.stats import ks_2samp
from functools import lru_cache
'''
Analyze wsprspots logs (prepared by WSPRLog2Pandas)
All manipulations are performed against an HDF5 store
with all reports in the "norm" dataset and the image
reports in the 'img' dataset.
'''
# utility functions
def absRangeMask(ser, min, max):
return (abs(ser) < min) | (abs(ser) > max)
def findrange(v):
for expd in range(-16,16):
for mant in [1, 2, 5]:
lim = mant * (10 ** expd)
#print("test v = %g lim = %g\n" % (v, lim))
if v < lim:
return lim
return v
# numeric filter functions
def identityFunc(v):
return v
def roundAZ(val, min, interval):
rv = roundInterval(val, min, interval)
rv[rv == 360] = 0
return rv
def roundInterval(val, min, interval):
'''
Round val to the nearest value that is min + N*interval
'''
Nf = (val - min)/interval
N = np.round(Nf)
return min + N * interval
def truncInterval(val, min, interval):
Nf = (val - min)/interval
N = np.trunc(Nf)
return min + N * interval
@lru_cache(maxsize=64)
def lamInterval(func, min, interval):
return lambda x : func(x, min, interval)
class WSPRImg:
def __init__(self, store_name, file_list=None, exp_name='', use_hdf5_store=False):
# init these sets as empty -- they're used as filters
# in the get chunks method.
self.bad_rx_calls = set()
self.bad_txrx_pairs = set()
self.exp_name = exp_name;
if (file_list == None) or (use_hdf5_store and os.path.isfile(store_name)):
print("Using HDF5 Store")
self.store = pd.HDFStore(store_name, mode='r', complib='zlib', complevel=9)
print("Got here to build exc list")
self.buildExclusionLists()
else:
self.readFilesToHDF5(file_list, store_name)
self.store.info
def __del__(self):
self.store.close()
del self.bad_rx_calls
del self.bad_txrx_pairs
gc.collect()
def readFilesToHDF5(self, flist, store_name):
'''
Read a list of files into an HDF5 store.
Read the input in chunks to keep storage to a minimum. '
The store will be written to two datasets in the hdf5 file.
'norm' is the set of all reports, and 'img' is the set of all
image reports.
On the way in, we create a new column "TXRX" with the catenation
of the TXCALL and RXCALL values.
'''
col_types = {'RXSOL':float, 'TXSOL':float, 'MIDSOL':float,
'SPOT':int, 'DTIME':int, 'DIFFSNR':float,
'RXCALL':str, 'TXCALL':str, 'RXGRID':str, 'TXGRID':str,
'REFSNR':float, 'FREQ':float, 'POW':float, 'DRIFT':float,
'DIST':float, 'AZ':float, 'BAND':str, 'VER':str, 'CODE':int,
'FREQDIFF':float}
item_sizes = {'RXCALL': 12, 'TXCALL':12, 'RXGRID':8, 'TXGRID':8,
# 'VER': 20, # 'BAND': 20, # 'CODE': 20,
'TXRX':24}
col_names = list(col_types.keys())
col_names.append('TXRX')
csize = 1024 * 1024 # read 1M records at a time.
# create the datastore
#self.store = pd.HDFStore(store_name, complib='zlib', complevel=9, columns=col_names, format='table')
# we accumulate bad lists along the way, and delete suspect contacts
#self.store = pd.HDFStore(store_name, mode='w', complib='zlib', complevel=9, format='table')
#self.store = pd.HDFStore(store_name, mode='w', complib='blosc:lz4', complevel=9, format='table')
self.store = pd.HDFStore(store_name, mode='w', complib='bzip2', complevel=9, format='table')
rcount = 0
for fn in flist:
for chunk in pd.read_csv(fn, dtype=col_types, chunksize=csize):
chunk['TXRX'] = chunk['TXCALL'].str.cat(chunk['RXCALL'])
chunk = chunk.drop(['BAND','VER','CODE','SPOT'],axis=1)
# create the image frame
img_chunk = self.filterLineFreqs(self.getImageFrame(chunk))
# now accumulate the bad calls
self.buildExclusionListOTF(img_chunk)
for col in ['RXSOL','TXSOL','MIDSOL','DIFFSNR','REFSNR','POW','DRIFT','AZ','FREQDIFF']:
chunk[col] = chunk[col].astype(np.float32)
# remove them from the chunk
chunk2 = chunk[~(chunk.RXCALL.isin(self.bad_rx_calls) | chunk.TXRX.isin(self.bad_txrx_pairs))]
# save all reports in the norm table
self.store.append('norm', chunk2, data_columns=True, min_itemsize = item_sizes)
# save image reports in the image table
self.store.append('img', self.filterLineFreqs(self.getImageFrame(chunk2)),
data_columns=True, min_itemsize = item_sizes)
del chunk
del chunk2
del img_chunk
print("%d\n" % rcount, end='')
rcount = rcount + csize
gc.collect()
return
def getImageFrame(self, fr):
return fr[fr.FREQDIFF != 0]
def filterLineFreqs(self, fr):
# return an all true mask
msk = fr.FREQDIFF < 1e19
for min,max in ((58,62), (48,52)):
for mul in (1, 2, 3):
lmin = min * mul
lmax = max * mul
nmsk = absRangeMask(fr.FREQDIFF, lmin, lmax)
msk = msk & nmsk
return fr[msk]
def buildExclusionListOTF(self, chunk):
'''
Build the exclusion list as we read it. (on the fly)
'''
# build the series from the value counts in a chunk
# then turn the series into a set (s = set(ser.unique())
# isin can test against a set (!)
# merge the sets as s1.union(s2) (does not modify either set...) or s1 | s2
tmp_rx_counts = chunk['RXCALL'].value_counts()
tmp_txrx_counts = chunk['TXRX'].value_counts()
srx = set(tmp_rx_counts[tmp_rx_counts > 4].index.to_series().unique())
stxrx = set(tmp_txrx_counts[tmp_txrx_counts > 3].index.to_series().unique())
self.bad_rx_calls = self.bad_rx_calls.union(srx)
self.bad_txrx_pairs = self.bad_txrx_pairs.union(stxrx)
print("bad_rx_calls len = %d bad_tx_pairs len = %d\n" % (len(self.bad_rx_calls), len(self.bad_txrx_pairs)))
def buildExclusionLists(self):
'''
When we process blocks of records, we need to skip records that
may be suspect in origin. These include records from RX stations
that report too many image events, and TX/RX pairs that report more
than 5 events.
These lists are called bad_rx_calls and bad_txrx_pairs
'''
# iterate through all the records in blocks of 100K rows
# we only need to scan the image list.
tmp_rx_counts = pd.Series([])
tmp_txrx_counts = | pd.Series([]) | pandas.Series |
from src.utils import get_files, get_platform_selector, read_file_per_line
from os import path
import re
import pandas as pd
dirname = path.dirname(__file__)
results_dir = path.join(dirname, "../tests/boot/output")
output_dir = path.join(dirname, "extracted")
output_file = "boot_times.csv"
results = get_files(results_dir)
data = {
'metal': [],
'kvm': [],
'firecracker': [],
'docker': [],
'gvisor': []
}
for f in results:
selector = get_platform_selector(f)
arr = data[selector]
def parse_line(line: str) -> None:
m = re.search('real\t(\d+)m([0-9\.]+)s', line)
if m:
minutes = int(m.group(1))
seconds = float(m.group(2))
arr.append(seconds + (minutes * 60))
read_file_per_line(path.join(results_dir, f), parse_line)
assert len(arr) == 10, "Expected to collect 10 results but got " + len(arr)
def avg_time(values) -> float:
count = len(values)
return sum(values) / count
df = | pd.DataFrame(data=data) | pandas.DataFrame |
# Modified version for Erie County, New York
# Contact: <EMAIL>
from functools import reduce
from typing import Generator, Tuple, Dict, Any, Optional
import os
import pandas as pd
import streamlit as st
import numpy as np
import matplotlib
from bs4 import BeautifulSoup
import requests
import ipyvuetify as v
from traitlets import Unicode, List
from datetime import date, datetime, timedelta
import time
import altair as alt
from collections import namedtuple
from scipy.integrate import odeint
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# Create S3 object to get the ENV variable from Heroku
#secret = os.environ['SECRET_KEY']
# Prompt the user for the secret
#password = st.text_input("Secret Handshake:", value="", type="password")
# If the secrete provided matches the ENV, proceeed with the app
#if password == secret:
# hide_menu_style = """
# <style>
#MainMenu {visibility: hidden;}
# </style>
# """
#st.markdown(hide_menu_style, unsafe_allow_html=True)
###########################
# Models and base functions
###########################
def sir(
s: float, i: float, r: float, beta: float, gamma: float, n: float
) -> Tuple[float, float, float]:
"""The SIR model, one time step."""
s_n = (-beta * s * i) + s
i_n = (beta * s * i - gamma * i) + i
r_n = gamma * i + r
if s_n < 0.0:
s_n = 0.0
if i_n < 0.0:
i_n = 0.0
if r_n < 0.0:
r_n = 0.0
scale = n / (s_n + i_n + r_n)
return s_n * scale, i_n * scale, r_n * scale
def gen_sir(
s: float, i: float, r: float, beta: float, gamma: float, n_days: int
) -> Generator[Tuple[float, float, float], None, None]:
"""Simulate SIR model forward in time yielding tuples."""
s, i, r = (float(v) for v in (s, i, r))
n = s + i + r
for _ in range(n_days + 1):
yield s, i, r
s, i, r = sir(s, i, r, beta, gamma, n)
def sim_sir(
s: float, i: float, r: float, beta: float, gamma: float, n_days: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, i, r = (float(v) for v in (s, i, r))
n = s + i + r
s_v, i_v, r_v = [s], [i], [r]
for day in range(n_days):
s, i, r = sir(s, i, r, beta, gamma, n)
s_v.append(s)
i_v.append(i)
r_v.append(r)
return (
np.array(s_v),
np.array(i_v),
np.array(r_v),
)
def sim_sir_df(
p) -> pd.DataFrame:
"""Simulate the SIR model forward in time.
p is a Parameters instance. for circuluar dependency reasons i can't annotate it.
"""
return pd.DataFrame(
data=gen_sir(S, total_infections, recovered, beta, gamma, n_days),
columns=("Susceptible", "Infected", "Recovered"),
)
def get_dispositions(
patient_state: np.ndarray, rates: Tuple[float, ...], regional_hosp_share: float = 1.0
) -> Tuple[np.ndarray, ...]:
"""Get dispositions of infected adjusted by rate and market_share."""
return (*(patient_state * rate * regional_hosp_share for rate in rates),)
def build_admissions_df(
dispositions) -> pd.DataFrame:
"""Build admissions dataframe from Parameters."""
days = np.array(range(0, n_days + 1))
data_dict = dict(
zip(
["day", "hosp", "icu", "vent"],
[days] + [disposition for disposition in dispositions],
)
)
projection = pd.DataFrame.from_dict(data_dict)
counter = 0
for i in hosp_list:
projection[groups[0]+"_"+i] = projection.hosp*bed_share.iloc[3,counter]
projection[groups[1]+"_"+i] = projection.icu*bed_share.iloc[3,counter]
projection[groups[2]+"_"+i] = projection.vent*bed_share.iloc[3,counter]
counter +=1
if counter == 4: break
# New cases
projection_admits = projection.iloc[:-1, :] - projection.shift(1)
projection_admits["day"] = range(projection_admits.shape[0])
return projection_admits
def build_admissions_df_n(
dispositions) -> pd.DataFrame:
"""Build admissions dataframe from Parameters."""
days = np.array(range(0, n_days))
data_dict = dict(
zip(
["day", "hosp", "icu", "vent"],
[days] + [disposition for disposition in dispositions],
)
)
projection = pd.DataFrame.from_dict(data_dict)
counter = 0
for i in hosp_list:
projection[groups[0]+"_"+i] = projection.hosp*bed_share.iloc[3,counter]
projection[groups[1]+"_"+i] = projection.icu*bed_share.iloc[3,counter]
projection[groups[2]+"_"+i] = projection.vent*bed_share.iloc[3,counter]
counter +=1
if counter == 4: break
# New cases
projection_admits = projection.iloc[:-1, :] - projection.shift(1)
projection_admits["day"] = range(projection_admits.shape[0])
return projection_admits
def build_prev_df_n(
dispositions) -> pd.DataFrame:
"""Build admissions dataframe from Parameters."""
days = np.array(range(0, n_days))
data_dict = dict(
zip(
["day", "hosp", "icu", "vent"],
[days] + [disposition for disposition in dispositions],
)
)
projection = pd.DataFrame.from_dict(data_dict)
counter = 0
for i in hosp_list:
projection[groups[0]+"_"+i] = projection.hosp*bed_share.iloc[3,counter]
projection[groups[1]+"_"+i] = projection.icu*bed_share.iloc[3,counter]
projection[groups[2]+"_"+i] = projection.vent*bed_share.iloc[3,counter]
counter +=1
if counter == 4: break
# New cases
projection_admits = projection.iloc[:-1, :] - projection.shift(1)
projection_admits["day"] = range(projection_admits.shape[0])
return projection_admits
def build_census_df(
projection_admits: pd.DataFrame) -> pd.DataFrame:
"""ALOS for each category of COVID-19 case (total guesses)"""
n_days = np.shape(projection_admits)[0]
los_dict = {
"hosp": hosp_los, "icu": icu_los, "vent": vent_los}
census_dict = dict()
for k, los in los_dict.items():
census = (
projection_admits.cumsum().iloc[:-los, :]
- projection_admits.cumsum().shift(los).fillna(0)
).apply(np.ceil)
census_dict[k] = census[k]
census_df = pd.DataFrame(census_dict)
census_df["day"] = census_df.index
census_df = census_df[["day", "hosp", "icu", "vent"]]
census_df = census_df.head(n_days-10)
return census_df
def seir(
s: float, e: float, i: float, r: float, beta: float, gamma: float, alpha: float, n: float
) -> Tuple[float, float, float, float]:
"""The SIR model, one time step."""
s_n = (-beta * s * i) + s
e_n = (beta * s * i) - alpha * e + e
i_n = (alpha * e - gamma * i) + i
r_n = gamma * i + r
if s_n < 0.0:
s_n = 0.0
if e_n < 0.0:
e_n = 0.0
if i_n < 0.0:
i_n = 0.0
if r_n < 0.0:
r_n = 0.0
scale = n / (s_n + e_n+ i_n + r_n)
return s_n * scale, e_n * scale, i_n * scale, r_n * scale
def sim_seir(
s: float, e:float, i: float, r: float, beta: float, gamma: float, alpha: float, n_days: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r = (float(v) for v in (s, e, i, r))
n = s + e + i + r
s_v, e_v, i_v, r_v = [s], [e], [i], [r]
for day in range(n_days):
s, e, i, r = seir(s, e, i, r, beta, gamma, alpha, n)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
)
def gen_seir(
s: float, e: float, i: float, r: float, beta: float, gamma: float, alpha: float, n_days: int
) -> Generator[Tuple[float, float, float, float], None, None]:
"""Simulate SIR model forward in time yielding tuples."""
s, e, i, r = (float(v) for v in (s, e, i, r))
n = s + e + i + r
for _ in range(n_days + 1):
yield s, e, i, r
s, e, i, r = seir(s, e, i, r, beta, gamma, alpha, n)
# phase-adjusted https://www.nature.com/articles/s41421-020-0148-0
def sim_seir_decay(
s: float, e:float, i: float, r: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r = (float(v) for v in (s, e, i, r))
n = s + e + i + r
s_v, e_v, i_v, r_v = [s], [e], [i], [r]
for day in range(n_days):
if start_day<=day<=int1_delta:
beta_decay=beta*(1-decay1)
elif int1_delta<=day<=int2_delta:
beta_decay=beta*(1-decay2)
elif int2_delta<=day<=step1_delta:
beta_decay=beta*(1-decay3)
else:
beta_decay=beta*(1-decay4)
s, e, i, r = seir(s, e, i, r, beta_decay, gamma, alpha, n)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
)
def seird(
s: float, e: float, i: float, r: float, d: float, beta: float, gamma: float, alpha: float, n: float, fatal: float
) -> Tuple[float, float, float, float]:
"""The SIR model, one time step."""
s_n = (-beta * s * i) + s
e_n = (beta * s * i) - alpha * e + e
i_n = (alpha * e - gamma * i) + i
r_n = (1-fatal)*gamma * i + r
d_n = (fatal)*gamma * i +d
if s_n < 0.0:
s_n = 0.0
if e_n < 0.0:
e_n = 0.0
if i_n < 0.0:
i_n = 0.0
if r_n < 0.0:
r_n = 0.0
if d_n < 0.0:
d_n = 0.0
scale = n / (s_n + e_n+ i_n + r_n + d_n)
return s_n * scale, e_n * scale, i_n * scale, r_n * scale, d_n * scale
def sim_seird_decay(
s: float, e:float, i: float, r: float, d: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int, fatal: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r, d= (float(v) for v in (s, e, i, r, d))
n = s + e + i + r + d
s_v, e_v, i_v, r_v, d_v = [s], [e], [i], [r], [d]
for day in range(n_days):
if start_day<=day<=int1_delta:
beta_decay=beta*(1-decay1)
elif int1_delta<=day<=int2_delta:
beta_decay=beta*(1-decay2)
elif int2_delta<=day<=step1_delta:
beta_decay=beta*(1-decay3)
else:
beta_decay=beta*(1-decay4)
s, e, i, r,d = seird(s, e, i, r, d, beta_decay, gamma, alpha, n, fatal)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
d_v.append(d)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
np.array(d_v)
)
# Model with high social distancing
def sim_seird_decay_social(
s: float, e:float, i: float, r: float, d: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int, fatal: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r, d= (float(v) for v in (s, e, i, r, d))
n = s + e + i + r + d
s_v, e_v, i_v, r_v, d_v = [s], [e], [i], [r], [d]
for day in range(n_days):
if start_day<=day<=int1_delta:
beta = (alpha+(2 ** (1 / 2) - 1))*((2 ** (1 / 2) - 1) + (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.02)
elif int1_delta<=day<=int2_delta:
beta = (alpha+(2 ** (1 / 2) - 1))*((2 ** (1 / 2) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.52)
elif int2_delta<=day<=step1_delta:
beta = (alpha+(2 ** (1 / 2) - 1))*((2 ** (1 / 2) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.83)
else:
beta = (alpha+(2 ** (1 / 2) - 1))*((2 ** (1 / 2) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.73)
s, e, i, r,d = seird(s, e, i, r, d, beta_decay, gamma, alpha, n, fatal)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
d_v.append(d)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
np.array(d_v)
)
# Model with dynamic doubling time
def sim_seird_decay_erie(
s: float, e:float, i: float, r: float, d: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int, fatal: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r, d= (float(v) for v in (s, e, i, r, d))
n = s + e + i + r + d
s_v, e_v, i_v, r_v, d_v = [s], [e], [i], [r], [d]
for day in range(n_days):
if start_day<=day<=int1_delta:
beta = (alpha+(2 ** (1 / 1.61) - 1))*((2 ** (1 / 1.61) - 1) + (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.3)
elif int1_delta<=day<=int2_delta:
beta = (alpha+(2 ** (1 / 2.65) - 1))*((2 ** (1 / 2.65) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.3)
elif int2_delta<=day<=step1_delta:
beta = (alpha+(2 ** (1 / 5.32) - 1))*((2 ** (1 / 5.32) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.5)
else:
beta = (alpha+(2 ** (1 / 9.70) - 1))*((2 ** (1 / 9.70) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.30)
s, e, i, r,d = seird(s, e, i, r, d, beta_decay, gamma, alpha, n, fatal)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
d_v.append(d)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
np.array(d_v)
)
def seijcrd(
s: float, e: float, i: float, j:float, c:float, r: float, d: float, beta: float, gamma: float, alpha: float, n: float, fatal_hosp: float, hosp_rate:float, icu_rate:float, icu_days:float,crit_lag:float, death_days:float
) -> Tuple[float, float, float, float]:
"""The SIR model, one time step."""
s_n = (-beta * s * (i+j+c)) + s
e_n = (beta * s * (i+j+c)) - alpha * e + e
i_n = (alpha * e - gamma * i) + i
j_n = hosp_rate * i * gamma + (1-icu_rate)* c *icu_days + j
c_n = icu_rate * j * (1/crit_lag) - c * (1/death_days)
r_n = (1-hosp_rate)*gamma * i + (1-icu_rate) * (1/crit_lag)* j + r
d_n = (fatal_hosp)* c * (1/crit_lag)+d
if s_n < 0.0:
s_n = 0.0
if e_n < 0.0:
e_n = 0.0
if i_n < 0.0:
i_n = 0.0
if j_n < 0.0:
j_n = 0.0
if c_n < 0.0:
c_n = 0.0
if r_n < 0.0:
r_n = 0.0
if d_n < 0.0:
d_n = 0.0
scale = n / (s_n + e_n+ i_n + j_n+ c_n+ r_n + d_n)
return s_n * scale, e_n * scale, i_n * scale, j_n* scale, c_n*scale, r_n * scale, d_n * scale
def sim_seijcrd_decay(
s: float, e:float, i: float, j:float, c: float, r: float, d: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int, fatal_hosp: float, hosp_rate: float, icu_rate: float, icu_days:float, crit_lag: float, death_days:float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, j, c, r, d= (float(v) for v in (s, e, i, c, j, r, d))
n = s + e + i + j+r + d
s_v, e_v, i_v, j_v, c_v, r_v, d_v = [s], [e], [i], [j], [c], [r], [d]
for day in range(n_days):
if 0<=day<=21:
beta = (alpha+(2 ** (1 / 1.61) - 1))*((2 ** (1 / 1.61) - 1) + (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-decay1)
elif 22<=day<=28:
beta = (alpha+(2 ** (1 / 2.65) - 1))*((2 ** (1 / 2.65) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-decay2)
elif 29<=day<=step1_delta:
beta = (alpha+(2 ** (1 / 5.32) - 1))*((2 ** (1 / 5.32) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-decay3)
else:
beta = (alpha+(2 ** (1 / 9.70) - 1))*((2 ** (1 / 9.70) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-decay4)
s, e, i,j, c, r,d = seijcrd(s, e, i,j, c, r, d, beta_decay, gamma, alpha, n, fatal_hosp, hosp_rate, icu_rate, icu_days, crit_lag, death_days)
s_v.append(s)
e_v.append(e)
i_v.append(i)
j_v.append(j)
c_v.append(c)
r_v.append(r)
d_v.append(d)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(j_v),
np.array(c_v),
np.array(r_v),
np.array(d_v)
)
def betanew(t,beta):
if start_day<= t <= int1_delta:
beta_decay=beta*(1-decay1)
elif int1_delta<=t<int2_delta:
beta_decay=beta*(1-decay2)
elif int2_delta<=t<int3_delta:
beta_decay=beta*(1-decay3)
elif int3_delta<=t<=step1_delta:
beta_decay=beta*(1-decay4)
elif step1_delta<=t<=step2_delta:
beta_decay=beta*(1-decay5)
else:
beta_decay=beta*(1-decay6)
return beta_decay
#The SIR model differential equations with ODE solver.
def derivdecay(y, t, N, beta, gamma1, gamma2, alpha, p, hosp, q, l, n_days, decay1, decay2, decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta, fatal_hosp ):
S, E, A, I,J, R,D,counter = y
dSdt = - betanew(t, beta) * S * (q*I + l*J + A)/N
dEdt = betanew(t, beta) * S * (q*I + l*J + A)/N - alpha * E
dAdt = alpha * E*(1-p)-gamma1*A
dIdt = p* alpha* E - gamma1 * I- hosp*I
dJdt = hosp * I -gamma2*J
dRdt = (1-fatal_hosp)*gamma2 * J + gamma1*(A+I)
dDdt = fatal_hosp * gamma2 * J
counter = (1-fatal_hosp)*gamma2 * J
return dSdt, dEdt,dAdt, dIdt, dJdt, dRdt, dDdt, counter
def sim_seaijrd_decay_ode(
s, e,a,i, j,r, d, beta, gamma1, gamma2, alpha, n_days, decay1, decay2,decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta,
fatal_hosp, p, hosp, q,
l):
n = s + e + a + i + j+ r + d
rh=0
y0= s,e,a,i,j,r,d, rh
t=np.arange(0, n_days, step=1)
ret = odeint(derivdecay, y0, t, args=(n, beta, gamma1, gamma2, alpha, p, hosp,q,l, n_days, decay1, decay2, decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta, fatal_hosp))
S_n, E_n,A_n, I_n,J_n, R_n, D_n ,RH_n= ret.T
return (S_n, E_n,A_n, I_n,J_n, R_n, D_n, RH_n)
####The SIR model differential equations with ODE solver. Presymptomatic and masks
def betanew2(t,beta,x,p_m1, pm_2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8):
if start_day<= t <= int1_delta:
beta_decay=beta*(1-decay1)
elif int1_delta<=t<int2_delta:
beta_decay=beta*(1-decay2)
elif int2_delta<=t<int3_delta:
beta_decay=beta*(1-decay3)
elif int3_delta<=t<=step1_delta:
beta_decay=beta*(1-decay4)*(1-(x*p_m1))**2
elif step1_delta<=t<=step2_delta:
beta_decay=beta*(1-decay5)*(1-(x*p_m2))**2
elif step2_delta<=t<=step3_delta:
beta_decay=beta*(1-decay6)*(1-(x*p_m3))**2
elif step3_delta<=t<=step4_delta:
beta_decay=beta*(1-decay7)*(1-(x*p_m4))**2
elif step4_delta<=t<=step5_delta:
beta_decay=beta*(1-decay8)*(1-(x*p_m5))**2
elif step5_delta<=t<=step6_delta:
beta_decay=beta*(1-decay9)*(1-(x*p_m6))**2
elif step6_delta<=t<=step7_delta:
beta_decay=beta*(1-decay10)*(1-(x*p_m7))**2
else:
beta_decay=beta*(1-decay11)*(1-(x*p_m8))**2
return beta_decay
def derivdecayP(y, t, beta, gamma1, gamma2, alpha, sym, hosp,q,l,n_days, decay1,decay2, decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta,
fatal_hosp, x, p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8, delta_p ):
S, E, P,A, I,J, R,D,counter = y
N=S+E+P+A+I+J+R+D
dSdt = - betanew2(t, beta, x, p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8) * S * (q*I + l*J +P+ A)/N
dEdt = betanew2(t, beta, x, p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8) * S * (q*I + l*J +P+ A)/N - alpha * E
dPdt = alpha * E - delta_p * P
dAdt = delta_p* P *(1-sym)-gamma1*A
dIdt = sym* delta_p* P - gamma1 * I- hosp*I
dJdt = hosp * I -gamma2*J
dRdt = (1-fatal_hosp)*gamma2 * J + gamma1*(A+I)
dDdt = fatal_hosp * gamma2 * J
counter = (1-fatal_hosp)*gamma2 * J
return dSdt, dEdt,dPdt,dAdt, dIdt, dJdt, dRdt, dDdt, counter
def sim_sepaijrd_decay_ode(
s, e,p,a,i, j,r, d, beta, gamma1, gamma2, alpha, n_days,decay1,decay2,decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta,
fatal_hosp, sym, hosp, q,
l,x, p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8, delta_p):
n = s + e + p+a + i + j+ r + d
rh=0
y0= s,e,p,a,i,j,r,d, rh
t=np.arange(0, n_days, step=1)
ret = odeint(derivdecayP, y0, t, args=(beta, gamma1, gamma2, alpha, sym, hosp,q,l, n_days, decay1, decay2, decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta,
fatal_hosp, x,
p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8, delta_p))
S_n, E_n,P_n,A_n, I_n,J_n, R_n, D_n ,RH_n= ret.T
return (S_n, E_n,P_n,A_n, I_n,J_n, R_n, D_n, RH_n)
# End Models #
# Add dates #
def add_date_column(
df: pd.DataFrame, drop_day_column: bool = False, date_format: Optional[str] = None,
) -> pd.DataFrame:
"""Copies input data frame and converts "day" column to "date" column
Assumes that day=0 is today and allocates dates for each integer day.
Day range can must not be continous.
Columns will be organized as original frame with difference that date
columns come first.
Arguments:
df: The data frame to convert.
drop_day_column: If true, the returned data frame will not have a day column.
date_format: If given, converts date_time objetcts to string format specified.
Raises:
KeyError: if "day" column not in df
ValueError: if "day" column is not of type int
"""
if not "day" in df:
raise KeyError("Input data frame for converting dates has no 'day column'.")
if not pd.api.types.is_integer_dtype(df.day):
raise KeyError("Column 'day' for dates converting data frame is not integer.")
df = df.copy()
# Prepare columns for sorting
non_date_columns = [col for col in df.columns if not col == "day"]
# Allocate (day) continous range for dates
n_days = int(df.day.max())
start = start_date
end = start + timedelta(days=n_days + 1)
# And pick dates present in frame
dates = pd.date_range(start=start, end=end, freq="D")[df.day.tolist()]
if date_format is not None:
dates = dates.strftime(date_format)
df["date"] = dates
if drop_day_column:
df.pop("day")
date_columns = ["date"]
else:
date_columns = ["day", "date"]
# sort columns
df = df[date_columns + non_date_columns]
return df
# Adding ICU bed for Erie county
# 3/25/20
icu_county = 468
beds_county = 2762
# Bed expansion at 50%
expanded_icu_county_05 = 369
expanded_beds_county_05 = 3570
# Bed expansion at 100%
expanded_icu_county_1 = 492
expanded_beds_county_1 = 4760
# PPE Values
ppe_mild_val_lower = 14
ppe_mild_val_upper = 15
ppe_severe_val_lower = 15
ppe_severe_val_upper = 24
# List of Hospitals
hosp_list = ['kh', 'ecmc', 'chs', 'rpci']
groups = ['hosp', 'icu', 'vent']
# Hospital Bed Sharing Percentage
# ignore the first 3 numbers
data = {
'Kaleida' : [0.34, 0.34, 0.26, 0.38],
'ECMC': [0.14, 0.20, 0.17, 0.23],
'CHS': [0.21, 0.17, 0.18, 0.33],
'RPCI': [0.0, 0.09, 0.06, 0.05]
}
bed_share = pd.DataFrame(data)
url = 'https://raw.githubusercontent.com/gabai/stream_KH/master/Cases_Erie.csv'
erie_df = pd.read_csv(url)
erie_df['Date'] = | pd.to_datetime(erie_df['Date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from pandas.plotting import scatter_matrix
from sklearn import model_selection, preprocessing, svm
from sklearn.linear_model import LinearRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import pickle
sp500 = | pd.read_csv(r'tests\SP500.csv', parse_dates=True, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 10:27:55 2021
@author: Raj
"""
import numpy as np
from .mechanical_drive import MechanicalDrive
from .utils.load import params_from_experiment as load_parm
from .utils.load import simulation_configuration as load_sim_config
from ffta.pixel_utils.load import configuration
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from scipy.signal import medfilt
from matplotlib import pyplot as plt
import pandas as pd
def cal_curve(can_path, param_cfg, taus_range=[], plot=True, **kwargs):
'''
Generates a calibration curve for a given cantilever given some particular
parameters.
Ideally you would have a tip parameters file as well.
Usage:
------
>>> param_cfg = 'path'
>>> can_params = 'path'
>>> taus, tfp, spl = cal_curve(param_cfg, can_params)
>>> from matplotlib import pyplot as plt
>>> plt.plot(tfp, taus, 'bX-')
If you want to change the fit parameters per tau
taus, tfp, spl = cal_curve(param_cfg, can_params, roi=0.001, n_taps=199)
:param can_path:
:type can_path: str
:param params_cfg: Path to parameters.cfg file (from FFtrEFM experiment, in the data folder)
:type params_cfg: string
:param taus_range: taus_range to set a range for the simulations, taken as [low, high]
:type taus_range: ndarray (2-index array), optional
:param plot: Plots the last taus vs tfps for verification
:type plot: bool, optional
:param kwargs:
:type kwargs:
:returns: tuple (taus, tfps, spl)
WHERE
ndarray taus is the single exponential taus that were simulated
ndarray tfps is the measured time to first peaks
UnivariateSpline spl is spline object of the calibration curve. To scale an image, type spl(x)
'''
if isinstance(can_path, str):
can_params, force_params, sim_params, _, parms = load_parm(can_path, param_cfg)
elif isinstance(can_path, tuple):
can_params, force_params, sim_params = load_sim_config(can_path)
_, parms = configuration(param_cfg)
can_params['drive_freq'] = parms['drive_freq']
can_params['res_freq'] = parms['drive_freq']
sim_params['trigger'] = parms['trigger']
sim_params['total_time'] = parms['total_time']
sim_params['sampling_rate'] = parms['sampling_rate']
_rlo = -7
_rhi = -3
if len(taus_range) != 2 or (taus_range[1] <= taus_range[0]):
raise ValueError('Range must be ascending and 2-items')
else:
_rlo = np.log10(taus_range[0])
_rhi = np.log10(taus_range[1])
# _rlo = np.floor(np.log10(taus_range[0]))
# _rhi = np.ceil(np.log10(taus_range[1]))
taus = np.logspace(_rlo, _rhi, 50)
tfps = []
for t in taus:
force_params['tau'] = t
cant = MechanicalDrive(can_params, force_params, sim_params)
Z, _ = cant.simulate()
pix = cant.analyze(plot=False, **kwargs)
tfps.append(pix.tfp)
# sort the arrays
taus = taus[np.argsort(tfps)]
tfps = np.sort(tfps)
# Splines work better on shorter lengthscales
taus = np.log(taus)
tfps = np.log(tfps)
# Error corrections
# negative x-values (must be monotonic for spline)
dtfp = np.diff(tfps)
tfps = np.array(tfps)
taus = np.array(taus)
tfps = np.delete(tfps, np.where(dtfp < 0)[0])
taus = np.delete(taus, np.where(dtfp < 0)[0])
# "hot" pixels in the cal-curve
hotpixels = np.abs(taus - medfilt(taus))
taus = np.delete(taus, np.where(hotpixels > 0))
tfps = np.delete(tfps, np.where(hotpixels > 0))
# Negative slopes
neg_slope = np.diff(taus) / np.diff(tfps)
while any(np.where(neg_slope < 0)[0]):
tfps = np.delete(tfps, np.where(neg_slope < 0)[0])
taus = np.delete(taus, np.where(neg_slope < 0)[0])
neg_slope = np.diff(taus) / np.diff(tfps)
# Infinite slops (tfp saturation at long taus)
while (any(np.where(neg_slope == np.inf)[0])):
tfps = np.delete(tfps, np.where(neg_slope == np.inf)[0])
taus = np.delete(taus, np.where(neg_slope == np.inf)[0])
neg_slope = np.diff(taus) / np.diff(tfps)
try:
spl = ius(tfps, taus, k=4)
except:
print('=== Error generating cal-curve. Check manually ===')
spl = None
print(taus)
print(tfps)
if plot:
pix.plot()
fig, ax = plt.subplots(facecolor='white')
ax.loglog(np.exp(tfps), np.exp(taus), 'bX-')
try:
ax.loglog(np.exp(tfps), np.exp(spl(tfps)), 'r--')
except:
pass
ax.set_xlabel('$t_{fp}$ (s)')
ax.set_ylabel(r'$\tau$ (s)')
ax.set_title('Calibration curve')
# Save Calibration Curve
df = | pd.DataFrame(index=taus, data=tfps) | pandas.DataFrame |
from __future__ import print_function, division
import pdb
import unittest
import random
from collections import Counter
import pandas as pd
import numpy as np
from scipy.spatial import distance as dist
from scipy.spatial import distance
from sklearn.neighbors import NearestNeighbors as NN
def get_ngbr(df, knn):
rand_sample_idx = random.randint(0, df.shape[0] - 1)
parent_candidate = df.iloc[rand_sample_idx]
ngbr = knn.kneighbors(parent_candidate.values.reshape(1, -1), 3, return_distance=False)
candidate_1 = df.iloc[ngbr[0][0]]
candidate_2 = df.iloc[ngbr[0][1]]
candidate_3 = df.iloc[ngbr[0][2]]
return parent_candidate, candidate_2, candidate_3
def generate_samples(no_of_samples, df, df_name):
total_data = df.values.tolist()
knn = NN(n_neighbors=5, algorithm='auto').fit(df)
for _ in range(no_of_samples):
cr = 0.8
f = 0.8
parent_candidate, child_candidate_1, child_candidate_2 = get_ngbr(df, knn)
new_candidate = []
for key, value in parent_candidate.items():
if isinstance(parent_candidate[key], bool):
new_candidate.append(parent_candidate[key] if cr < random.random() else not parent_candidate[key])
elif isinstance(parent_candidate[key], str):
new_candidate.append(
random.choice([parent_candidate[key], child_candidate_1[key], child_candidate_2[key]]))
elif isinstance(parent_candidate[key], list):
temp_lst = []
for i, each in enumerate(parent_candidate[key]):
temp_lst.append(parent_candidate[key][i] if cr < random.random() else
int(parent_candidate[key][i] +
f * (child_candidate_1[key][i] - child_candidate_2[key][i])))
new_candidate.append(temp_lst)
else:
new_candidate.append(abs(parent_candidate[key] + f * (child_candidate_1[key] - child_candidate_2[key])))
total_data.append(new_candidate)
final_df = | pd.DataFrame(total_data) | pandas.DataFrame |
import logging
logger = logging.getLogger(__name__)
import autosklearn.metrics
import copy
import joblib
import numpy as np
import pandas as pd
import mlxtend.feature_selection
import networkx as nx
import sklearn.pipeline
import sklearn.preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.utils.validation import check_is_fitted
import as_asl.as_asl_utils as as_asl_utils
import as_asl.as_asl_filenames as filenames
from as_asl.validate import Validator
import misc.automl_utils as automl_utils
from misc.nan_standard_scaler import NaNStandardScaler
import misc.parallel as parallel
import misc.utils as utils
class ASaslEnsemble:
def __init__(self, args, solvers, use_random_forests=False):
self.args = args
self.solvers = solvers
self.use_random_forests = use_random_forests
# train each of the regressors
def _fit_regressor(self, solver):
model = self.solver_asl_regressors[solver]
y_train = self.y_train[solver]
# we want to punish large errors, so use mse
metric = autosklearn.metrics.mean_squared_error
num_nan = np.isnan(self.X_train).sum()
num_inf = np.isinf(self.X_train).sum()
msg = ("[as_asl_ensemble._fit_regressor]: num_nan(X_train): {}".
format(num_nan))
logger.debug(msg)
msg = ("[as_asl_ensemble._fit_regressor]: num_inf(X_train): {}".
format(num_inf))
logger.debug(msg)
if self.use_random_forests:
model_fit = model.fit(self.X_train, y_train)
else:
model_fit = model.fit(self.X_train, y_train, metric=metric)
return (solver, model_fit)
def _fit_init(self, X_train, y_train):
# make sure we can encode our algorithm labels
self.le = sklearn.preprocessing.LabelEncoder()
self.le_ = self.le.fit(self.solvers)
# create the solver-specific datasets
self.y_train = {
solver: y_train[solver]
for solver in self.solvers
}
# and save the training dataset
if isinstance(X_train, pd.DataFrame):
self.X_train = X_train.values
else:
self.X_train = X_train
self.orig_y_train = y_train
return self
def _fit_regressors(self):
# create the regressors for each solver
if self.use_random_forests:
self.solver_asl_regressors = {
solver: RandomForestRegressor(
n_estimators=100
) for solver in self.solvers
}
else:
self.solver_asl_regressors = {
solver: automl_utils.AutoSklearnWrapper(
estimator_named_step="regressor", args=self.args
) for solver in self.solvers
}
# fit the regressors
ret = parallel.apply_parallel_iter(
self.solvers,
self.args.num_cpus,
self._fit_regressor
)
self.solver_asl_regressors_ = dict(ret)
return self
def _get_stacking_model_dataset_asl(self, X):
X_stacking_train = | pd.DataFrame() | pandas.DataFrame |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from keras.models import Model
from keras.layers import Dense, Embedding, Input , Activation
from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, GRU
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.layers import Flatten , Conv1D , GlobalMaxPooling1D , GlobalAveragePooling1D, MaxPooling1D
from keras.models import Sequential
import re , os
import logging, gensim , random
from gensim.models import word2vec
from keras.layers.merge import concatenate
# conf
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
max_features = 20000
#ax_features = 15000
######## ARMONY #####################################
# maxlen 200 (2x)
# EMBEDDING_DIM 100 (x) <---
# GRU 100 (layers = 1) (x)
# num_dense 100 (x)
#####################################################
maxlen = 600
EMBEDDING_DIM_1 = 300
we_fn_1='glove.840B.300d.txt'
EMBEDDING_DIM_2 = 200
we_fn_2='glove.twitter.27B.200d.txt'
#num_lstm = 300
lstm_layers = 1
rate_drop_dense = 0.1
num_dense = EMBEDDING_DIM_1 + EMBEDDING_DIM_2
batch_size = 32
epochs = 10
# load data
train = | pd.read_csv("data/train.csv") | pandas.read_csv |
# pylint: disable-msg=E1101,W0613,W0603
import os
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas_datareader.io import read_jsdmx
class TestJSDMX(object):
def setup_method(self, method):
self.dirpath = tm.get_data_path()
def test_tourism(self):
# OECD -> Industry and Services -> Inbound Tourism
result = read_jsdmx(os.path.join(self.dirpath, 'jsdmx',
'tourism.json'))
assert isinstance(result, pd.DataFrame)
exp_col = pd.MultiIndex.from_product(
[['Japan'], ['China', 'Hong Kong, China',
'Total international arrivals',
'Total international receipts',
'International passenger transport receipts',
'International travel receipts',
'Korea', 'Chinese Taipei', 'United States']],
names=['Country', 'Variable'])
exp_idx = pd.DatetimeIndex(['2004', '2005', '2006', '2007',
'2008', '2009', '2010', '2011',
'2012'], name='Year')
values = np.array([
[616, 300, 6138, 1550, 330, 1220, 1588, 1081, 760],
[653, 299, 6728, 1710, 340, 1370, 1747, 1275, 822],
[812, 352, 7334, 1330, 350, 980, 2117, 1309, 817],
[942, 432, 8347, 1460, 360, 1100, 2601, 1385, 816],
[1000, 550, 8351, 1430, 310, 1120, 2382, 1390, 768],
[1006, 450, 6790, 1170, 210, 960, 1587, 1024, 700],
[1413, 509, 8611, 1350, 190, 1160, 2440, 1268, 727],
[1043, 365, 6219, 1000, 100, 900, 1658, 994, 566],
[1430, 482, 8368, 1300, 100, 1200, 2044, 1467, 717]])
expected = pd.DataFrame(values, index=exp_idx, columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_land_use(self):
# OECD -> Environment -> Resources Land Use
result = read_jsdmx(os.path.join(self.dirpath, 'jsdmx',
'land_use.json'))
assert isinstance(result, pd.DataFrame)
result = result.loc['2010':'2011']
exp_col = pd.MultiIndex.from_product([
['Japan', 'United States'],
['Arable land and permanent crops',
'Arable and cropland % land area',
'Total area', 'Forest', 'Forest % land area',
'Land area', 'Permanent meadows and pastures',
'Meadows and pastures % land area', 'Other areas',
'Other % land area']], names=['Country', 'Variable'])
exp_idx = pd.DatetimeIndex(['2010', '2011'], name='Year')
values = np.array([[45930, 12.601, 377950, 249790, 68.529, 364500,
np.nan, np.nan, 68780, 18.87, 1624330, 17.757,
9831510, 3040220, 33.236, 9147420, 2485000,
27.166, 1997870, 21.841],
[45610, 12.513, 377955, 249878, 68.554, 364500,
np.nan, np.nan, 69012, 18.933, 1627625, 17.793,
9831510, 3044048, 33.278, 9147420, 2485000,
27.166, 1990747, 21.763]])
expected = | pd.DataFrame(values, index=exp_idx, columns=exp_col) | pandas.DataFrame |
"""
Written by <NAME>, 22-10-2018
This script contains functions for data formatting and accuracy assessment of keras models
"""
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import keras.backend as K
from math import sqrt
import numpy as np
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# model cost function
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
# scale and format observed data as train/test inputs/labels
def format_obs_data(full_data, n_lags, n_ahead, n_train):
# split datetime column into train and test for plots
train_dates = full_data[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[:n_train]
test_dates = full_data[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[n_train:]
test_dates = test_dates.reset_index(drop=True)
test_dates['Datetime'] = pd.to_datetime(test_dates['Datetime'])
values = full_data[['GWL', 'Tide', 'Precip.']].values
values = values.astype('float32')
gwl = values[:, 0]
gwl = gwl.reshape(gwl.shape[0], 1)
tide = values[:, 1]
tide = tide.reshape(tide.shape[0], 1)
rain = values[:, 2]
rain = rain.reshape(rain.shape[0], 1)
# normalize features with individual scalers
gwl_scaler, tide_scaler, rain_scaler = MinMaxScaler(), MinMaxScaler(), MinMaxScaler()
gwl_fit = gwl_scaler.fit(gwl)
gwl_scaled = gwl_fit.transform(gwl)
tide_fit = tide_scaler.fit(tide)
tide_scaled = tide_fit.transform(tide)
rain_fit = rain_scaler.fit(rain)
rain_scaled = rain_fit.transform(rain)
# frame as supervised learning
gwl_super = series_to_supervised(gwl_scaled, n_lags, n_ahead)
gwl_super_values = gwl_super.values
tide_super = series_to_supervised(tide_scaled, n_lags, n_ahead)
tide_super_values = tide_super.values
rain_super = series_to_supervised(rain_scaled, n_lags, n_ahead)
rain_super_values = rain_super.values
# split groundwater into inputs and labels
gwl_input, gwl_labels = gwl_super_values[:, 0:n_lags+1], gwl_super_values[:, n_lags+1:]
# split into train and test sets
train_X = np.concatenate((gwl_input[:n_train, :], tide_super_values[:n_train, :], rain_super_values[:n_train, :]),
axis=1)
test_X = np.concatenate((gwl_input[n_train:, :], tide_super_values[n_train:, :], rain_super_values[n_train:, :]),
axis=1)
train_y, test_y = gwl_labels[:n_train, :], gwl_labels[n_train:, :]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("observed training input data shape:", train_X.shape, "observed training label data shape:", train_y.shape)
print("observed testing input data shape:", test_X.shape, "observed testing label data shape:", test_y.shape)
return train_dates, test_dates, tide_fit, rain_fit, gwl_fit, train_X, test_X, train_y, test_y
# scale and format storm data as train/test inputs/labels
def format_storm_data(storm_data, n_train, tide_fit, rain_fit, gwl_fit):
# separate storm data into gwl, tide, and rain
storm_scaled = pd.DataFrame(storm_data["Datetime"])
for col in storm_data.columns:
if col.split("(")[0] == "tide":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = tide_fit.transform(col_data)
storm_scaled[col] = col_scaled
if col.split("(")[0] == "rain":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = rain_fit.transform(col_data)
storm_scaled[col] = col_scaled
if col.split("(")[0] == "gwl":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = gwl_fit.transform(col_data)
storm_scaled[col] = col_scaled
# split storm data into inputs and labels
storm_values = storm_scaled[storm_scaled.columns[1:]].values
storm_input, storm_labels = storm_values[:, :-18], storm_values[:, -18:]
# split into train and test sets
train_X, test_X = storm_input[:n_train, :], storm_input[n_train:, :]
train_y, test_y = storm_labels[:n_train, :], storm_labels[n_train:, :]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("observed training input data shape:", train_X.shape, "observed training label data shape:", train_y.shape)
print("observed testing input data shape:", test_X.shape, "observed testing label data shape:", test_y.shape)
return train_X, test_X, train_y, test_y
# scale and format forecast data as train/test inputs/labels
def format_fcst_data(fcst_data, tide_fit, rain_fit, gwl_fit):
# separate forecast data into gwl, tide, and rain
fcst_scaled = pd.DataFrame(fcst_data["Datetime"])
for col in fcst_data.columns:
if col.split("(")[0] == "tide":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = tide_fit.transform(col_data)
fcst_scaled[col] = col_scaled
if col.split("(")[0] == "rain":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = rain_fit.transform(col_data)
fcst_scaled[col] = col_scaled
if col.split("(")[0] == "gwl":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = gwl_fit.transform(col_data)
fcst_scaled[col] = col_scaled
# split fcst data into inputs and labels
fcst_values = fcst_scaled[fcst_scaled.columns[1:]].values
fcst_input, fcst_labels = fcst_values[:, :-18], fcst_values[:, -18:]
# reshape fcst input to be 3D [samples, timesteps, features]
fcst_test_X = fcst_input.reshape((fcst_input.shape[0], 1, fcst_input.shape[1]))
print("forecast input data shape:", fcst_test_X.shape, "forecast label data shape:", fcst_labels.shape)
return fcst_test_X, fcst_labels
# create df of full observed data and predictions and extract storm data
def full_pred_df(test_dates, storm_data, n_lags, n_ahead, inv_y, inv_yhat):
dates_t1 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 1:-n_ahead + 2])
dates_t1 = dates_t1.reset_index(inplace=False, drop=True)
dates_9 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 9:-n_ahead + 10])
dates_9 = dates_9.reset_index(inplace=False, drop=True)
dates_18 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 18:])
dates_18 = dates_18.reset_index(inplace=False, drop=True)
obs_t1 = np.reshape(inv_y[:, 0], (inv_y.shape[0], 1))
pred_t1 = np.reshape(inv_yhat[:, 0], (inv_y.shape[0], 1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = pd.DataFrame(df_t1, index=None, columns=["Obs. GWL t+1", "Pred. GWL t+1"])
df_t1 = pd.concat([df_t1, dates_t1], axis=1)
df_t1 = df_t1.set_index("Datetime")
obs_t9 = np.reshape(inv_y[:, 8], (inv_y.shape[0], 1))
pred_t9 = np.reshape(inv_yhat[:, 8], (inv_y.shape[0], 1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 = pd.DataFrame(df_t9, index=None, columns=["Obs. GWL t+9", "Pred. GWL t+9"])
df_t9 = pd.concat([df_t9, dates_9], axis=1)
df_t9 = df_t9.set_index("Datetime")
obs_t18 = np.reshape(inv_y[:, 17], (inv_y.shape[0], 1))
pred_t18 = np.reshape(inv_yhat[:, 17], (inv_y.shape[0], 1))
df_t18 = np.concatenate([obs_t18, pred_t18], axis=1)
df_t18 = pd.DataFrame(df_t18, index=None, columns=["Obs. GWL t+18", "Pred. GWL t+18"])
df_t18 = pd.concat([df_t18, dates_18], axis=1)
df_t18 = df_t18.set_index("Datetime")
storm_dates_t1 = storm_data[['gwl(t+1)']]
storm_dates_t1.index = storm_dates_t1.index + pd.DateOffset(hours=1)
storm_dates_t9 = storm_data[['gwl(t+9)']]
storm_dates_t9.index = storm_dates_t9.index + pd.DateOffset(hours=9)
storm_dates_t18 = storm_data[['gwl(t+18)']]
storm_dates_t18.index = storm_dates_t18.index + pd.DateOffset(hours=18)
df_t1_storms = np.asarray(df_t1[df_t1.index.isin(storm_dates_t1.index)])
df_t9_storms = np.asarray(df_t9[df_t9.index.isin(storm_dates_t9.index)])
df_t18_storms = np.asarray(df_t18[df_t18.index.isin(storm_dates_t18.index)])
storms_list = [df_t1_storms, df_t9_storms, df_t18_storms]
return df_t1, df_t9, df_t18, storms_list
# create df of storm observed data and predictions
def storm_pred_df(storm_data, n_train, inv_y, inv_yhat):
test_dates_t1 = storm_data[['Datetime', 'tide(t+1)', 'rain(t+1)']].iloc[n_train:]
test_dates_t1 = test_dates_t1.reset_index(drop=True)
test_dates_t1['Datetime'] = pd.to_datetime(test_dates_t1['Datetime'])
test_dates_t1['Datetime'] = test_dates_t1['Datetime'] + pd.DateOffset(hours=1)
test_dates_t9 = storm_data[['Datetime', 'tide(t+9)', 'rain(t+9)']].iloc[n_train:]
test_dates_t9 = test_dates_t9.reset_index(drop=True)
test_dates_t9['Datetime'] = pd.to_datetime(test_dates_t9['Datetime'])
test_dates_t9['Datetime'] = test_dates_t9['Datetime'] + pd.DateOffset(hours=9)
test_dates_t18 = storm_data[['Datetime', 'tide(t+18)', 'rain(t+18)']].iloc[n_train:]
test_dates_t18 = test_dates_t18.reset_index(drop=True)
test_dates_t18['Datetime'] = pd.to_datetime(test_dates_t18['Datetime'])
test_dates_t18['Datetime'] = test_dates_t18['Datetime'] + pd.DateOffset(hours=18)
obs_t1 = np.reshape(inv_y[:, 0], (inv_y.shape[0], 1))
pred_t1 = np.reshape(inv_yhat[:, 0], (inv_y.shape[0], 1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = pd.DataFrame(df_t1, index=None, columns=["obs", "pred"])
df_t1 = pd.concat([df_t1, test_dates_t1], axis=1)
df_t1 = df_t1.set_index("Datetime")
df_t1 = df_t1.rename(columns={'obs': 'Obs. GWL t+1', 'pred': 'Pred. GWL t+1'})
obs_t9 = np.reshape(inv_y[:, 8], (inv_y.shape[0], 1))
pred_t9 = np.reshape(inv_yhat[:, 8], (inv_y.shape[0], 1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 = pd.DataFrame(df_t9, index=None, columns=["obs", "pred"])
df_t9 = pd.concat([df_t9, test_dates_t9], axis=1)
df_t9 = df_t9.set_index("Datetime")
df_t9 = df_t9.rename(columns={'obs': 'Obs. GWL t+9', 'pred': 'Pred. GWL t+9'})
obs_t18 = np.reshape(inv_y[:, 17], (inv_y.shape[0], 1))
pred_t18 = np.reshape(inv_yhat[:, 17], (inv_y.shape[0], 1))
df_t18 = np.concatenate([obs_t18, pred_t18], axis=1)
df_t18 = | pd.DataFrame(df_t18, index=None, columns=["obs", "pred"]) | pandas.DataFrame |
import os
import json
import numpy as np
import pandas as pd
from copy import copy
import matplotlib.pyplot as plt
from abc import abstractmethod
from IPython.display import display, display_markdown
from .utils import load_parquet, Position
from common_utils_dev import make_dirs
from collections import OrderedDict, defaultdict
import empyrical as emp
from common_utils_dev import to_parquet
CONFIG = {
"report_prefix": "v001",
"detail_report": False,
"position_side": "longshort",
"entry_ratio": 0.055,
"commission": {"entry": 0.0004, "exit": 0.0002, "spread": 0.0004},
"min_holding_minutes": 1,
"max_holding_minutes": 30,
"compound_interest": True,
"order_criterion": "capital",
"possible_in_debt": False,
"exit_if_achieved": True,
"achieve_ratio": 1,
"achieved_with_commission": False,
"max_n_updated": 0,
"positive_entry_threshold": 8,
"negative_entry_threshold": 8,
"exit_threshold": "auto",
"positive_probability_threshold": 8,
"negative_probability_threshold": 8,
"adjust_prediction": False,
}
def make_flat(series):
flatten = []
for key, values in series.to_dict().items():
if isinstance(values, list):
for value in values:
flatten.append(pd.Series({key: value}))
else:
flatten.append(pd.Series({key: values}))
return pd.concat(flatten).sort_index()
class BasicBacktester:
def __init__(
self,
base_currency,
dataset_dir,
exp_dir,
report_prefix=CONFIG["report_prefix"],
detail_report=CONFIG["detail_report"],
position_side=CONFIG["position_side"],
entry_ratio=CONFIG["entry_ratio"],
commission=CONFIG["commission"],
min_holding_minutes=CONFIG["min_holding_minutes"],
max_holding_minutes=CONFIG["max_holding_minutes"],
compound_interest=CONFIG["compound_interest"],
order_criterion=CONFIG["order_criterion"],
possible_in_debt=CONFIG["possible_in_debt"],
exit_if_achieved=CONFIG["exit_if_achieved"],
achieve_ratio=CONFIG["achieve_ratio"],
achieved_with_commission=CONFIG["achieved_with_commission"],
max_n_updated=CONFIG["max_n_updated"],
positive_entry_threshold=CONFIG["positive_entry_threshold"],
negative_entry_threshold=CONFIG["negative_entry_threshold"],
exit_threshold=CONFIG["exit_threshold"],
positive_probability_threshold=CONFIG["positive_probability_threshold"],
negative_probability_threshold=CONFIG["negative_probability_threshold"],
adjust_prediction=CONFIG["adjust_prediction"],
):
assert position_side in ("long", "short", "longshort")
self.base_currency = base_currency
self.report_prefix = report_prefix
self.detail_report = detail_report
self.position_side = position_side
self.entry_ratio = entry_ratio
self.commission = commission
self.min_holding_minutes = min_holding_minutes
self.max_holding_minutes = max_holding_minutes
self.compound_interest = compound_interest
self.order_criterion = order_criterion
assert self.order_criterion in ("cache", "capital")
self.possible_in_debt = possible_in_debt
self.exit_if_achieved = exit_if_achieved
self.achieve_ratio = achieve_ratio
self.achieved_with_commission = achieved_with_commission
self.max_n_updated = max_n_updated
self.positive_entry_threshold = positive_entry_threshold
self.negative_entry_threshold = negative_entry_threshold
self.exit_threshold = exit_threshold
assert isinstance(exit_threshold, (float, int, str))
if type(exit_threshold) == str:
assert (exit_threshold == "auto") or ("*" in exit_threshold)
self.positive_probability_threshold = positive_probability_threshold
self.negative_probability_threshold = negative_probability_threshold
self.adjust_prediction = adjust_prediction
self.dataset_dir = dataset_dir
self.exp_dir = exp_dir
self.initialize()
def _load_prediction_abs_bins(self):
return load_parquet(
path=os.path.join(
self.exp_dir, "generated_output/prediction_abs_bins.parquet.zstd"
)
)
def _load_probability_bins(self):
return load_parquet(
path=os.path.join(
self.exp_dir, "generated_output/probability_bins.parquet.zstd"
)
)
def _build_historical_data_dict(self, base_currency, historical_data_path_dict):
historical_data_path_dict = copy(historical_data_path_dict)
data_dict = {}
# We use open pricing to handling, entry: open, exit: open
data_dict["pricing"] = (
load_parquet(path=historical_data_path_dict.pop("pricing"))
.xs("open", axis=1, level=1)
.astype("float16")
)
columns = data_dict["pricing"].columns
columns_with_base_currency = columns[
columns.str.endswith(base_currency.upper())
]
data_dict["pricing"] = data_dict["pricing"][columns_with_base_currency]
for data_type, data_path in historical_data_path_dict.items():
data_dict[data_type] = load_parquet(path=data_path).astype("float16")
# Filter by base_currency
data_dict[data_type] = data_dict[data_type][columns_with_base_currency]
return data_dict
def _set_bins(self, prediction_abs_bins, probability_bins, index):
assert (prediction_abs_bins >= 0).all().all()
assert (probability_bins >= 0).all().all()
self.positive_entry_bins = None
self.negative_entry_bins = None
self.exit_bins = None
self.positive_probability_bins = None
self.negative_probability_bins = None
if isinstance(self.positive_entry_threshold, str):
if "*" in self.positive_entry_threshold:
self.positive_entry_bins = (
prediction_abs_bins.loc[
int(self.positive_entry_threshold.split("*")[0])
]
* float(self.positive_entry_threshold.split("*")[-1])
)[index]
else:
self.positive_entry_bins = prediction_abs_bins.loc[
self.positive_entry_threshold
][index]
if isinstance(self.negative_entry_threshold, str):
if "*" in self.negative_entry_threshold:
self.negative_entry_bins = -(
prediction_abs_bins.loc[
int(self.negative_entry_threshold.split("*")[0])
]
* float(self.negative_entry_threshold.split("*")[-1])
)[index]
else:
self.negative_entry_bins = -prediction_abs_bins.loc[
self.negative_entry_threshold
][index]
if isinstance(self.exit_threshold, str):
if "*" in self.exit_threshold:
self.exit_bins = (
prediction_abs_bins.loc[int(self.exit_threshold.split("*")[0])]
* float(self.exit_threshold.split("*")[-1])
)[index]
else:
self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index]
if isinstance(self.positive_probability_threshold, str):
if "*" in self.positive_probability_threshold:
self.positive_probability_bins = (
probability_bins.loc[
int(self.positive_probability_threshold.split("*")[0])
]
* float(self.positive_probability_threshold.split("*")[-1])
)[index]
else:
self.positive_probability_bins = probability_bins.loc[
self.positive_probability_threshold
][index]
if isinstance(self.negative_probability_threshold, str):
if "*" in self.negative_probability_threshold:
self.negative_probability_bins = (
probability_bins.loc[
int(self.negative_probability_threshold.split("*")[0])
]
* float(self.negative_probability_threshold.split("*")[-1])
)[index]
else:
self.negative_probability_bins = probability_bins.loc[
self.negative_probability_threshold
][index]
def build(self):
self.report_store_dir = os.path.join(self.exp_dir, "reports/")
make_dirs([self.report_store_dir])
self.historical_data_dict = self._build_historical_data_dict(
base_currency=self.base_currency,
historical_data_path_dict={
"pricing": os.path.join(self.dataset_dir, "test/pricing.parquet.zstd"),
"predictions": os.path.join(
self.exp_dir, "generated_output/predictions.parquet.zstd"
),
"probabilities": os.path.join(
self.exp_dir, "generated_output/probabilities.parquet.zstd"
),
"labels": os.path.join(
self.exp_dir, "generated_output/labels.parquet.zstd"
),
},
)
self.tradable_coins = self.historical_data_dict["predictions"].columns
self.index = (
self.historical_data_dict["predictions"].index
& self.historical_data_dict["pricing"].index
).sort_values()
for key in self.historical_data_dict.keys():
self.historical_data_dict[key] = self.historical_data_dict[key].reindex(
self.index
)
prediction_abs_bins = self._load_prediction_abs_bins()
probability_bins = self._load_probability_bins()
self._set_bins(
prediction_abs_bins=prediction_abs_bins,
probability_bins=probability_bins,
index=self.tradable_coins,
)
def initialize(self):
self.historical_caches = {}
self.historical_capitals = {}
self.historical_trade_returns = defaultdict(list)
if self.detail_report is True:
self.historical_entry_reasons = defaultdict(list)
self.historical_exit_reasons = defaultdict(list)
self.historical_profits = defaultdict(list)
self.historical_positions = {}
self.positions = []
self.cache = 1
def report(self, value, target, now, append=False):
if hasattr(self, target) is False:
return
if append is True:
getattr(self, target)[now].append(value)
return
assert now not in getattr(self, target)
getattr(self, target)[now] = value
def generate_report(self):
historical_caches = pd.Series(self.historical_caches).rename("cache")
historical_capitals = pd.Series(self.historical_capitals).rename("capital")
historical_returns = (
pd.Series(self.historical_capitals)
.pct_change(fill_method=None)
.fillna(0)
.rename("return")
)
historical_trade_returns = pd.Series(self.historical_trade_returns).rename(
"trade_return"
)
report = [
historical_caches,
historical_capitals,
historical_returns,
historical_trade_returns,
]
if self.detail_report is True:
historical_entry_reasons = pd.Series(self.historical_entry_reasons).rename(
"entry_reason"
)
historical_exit_reasons = pd.Series(self.historical_exit_reasons).rename(
"exit_reason"
)
historical_profits = pd.Series(self.historical_profits).rename("profit")
historical_positions = pd.Series(self.historical_positions).rename(
"position"
)
report += [
historical_entry_reasons,
historical_exit_reasons,
historical_profits,
historical_positions,
]
report = pd.concat(report, axis=1).sort_index()
report.index = pd.to_datetime(report.index)
return report
def store_report(self, report):
metrics = self.build_metrics().to_frame().T
to_parquet(
df=metrics.astype("float32"),
path=os.path.join(
self.report_store_dir,
f"metrics_{self.report_prefix}_{self.base_currency}.parquet.zstd",
),
)
to_parquet(
df=report,
path=os.path.join(
self.report_store_dir,
f"report_{self.report_prefix}_{self.base_currency}.parquet.zstd",
),
)
params = {
"base_currency": self.base_currency,
"position_side": self.position_side,
"entry_ratio": self.entry_ratio,
"commission": self.commission,
"min_holding_minutes": self.min_holding_minutes,
"max_holding_minutes": self.max_holding_minutes,
"compound_interest": self.compound_interest,
"order_criterion": self.order_criterion,
"possible_in_debt": self.possible_in_debt,
"achieved_with_commission": self.achieved_with_commission,
"max_n_updated": self.max_n_updated,
"tradable_coins": tuple(self.tradable_coins.tolist()),
"exit_if_achieved": self.exit_if_achieved,
"achieve_ratio": self.achieve_ratio,
"positive_entry_threshold": self.positive_entry_threshold,
"negative_entry_threshold": self.negative_entry_threshold,
"exit_threshold": self.exit_threshold,
"positive_probability_threshold": self.positive_probability_threshold,
"negative_probability_threshold": self.negative_probability_threshold,
"adjust_prediction": self.adjust_prediction,
}
with open(
os.path.join(
self.report_store_dir,
f"params_{self.report_prefix}_{self.base_currency}.json",
),
"w",
) as f:
json.dump(params, f)
print(f"[+] Report is stored: {self.report_prefix}_{self.base_currency}")
def build_metrics(self):
assert len(self.historical_caches) != 0
assert len(self.historical_capitals) != 0
assert len(self.historical_trade_returns) != 0
historical_returns = (
pd.Series(self.historical_capitals).pct_change(fill_method=None).fillna(0)
)
historical_trade_returns = make_flat(
| pd.Series(self.historical_trade_returns) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 12:07:56 2020
@author: B.Mika-Gospdoorz
Input files: .tsv quantification table with combined results from all samples
.tsv file with annotations extracted from gff using extract_annotations_from_gff.py
Output file: *combined_quant_gene_level_annotations.tsv with gene annotations and quantification results
Description: Used to combine annotations with quantification results
"""
import argparse
import pandas as pd
# function to combine annotations with quantification results
def combine_annotations_quant(quantification_table, annotations_table, gene_attribute, organism):
# read quantification results
col_names = pd.read_csv(quantification_table, sep = '\t', nrows=0).columns
types_dict = {gene_attribute: str}
types_dict.update({col: float for col in col_names if col not in types_dict})
quantification = pd.read_csv(quantification_table,sep="\t",index_col=0, dtype = types_dict)
# read annotations
annotations = | pd.read_csv(annotations_table,sep="\t",index_col=0, dtype='str') | pandas.read_csv |
######## EPAUNI
with open (fn, 'r') as file:
list_lines = [line for line in file.readlines() if line.strip()]
# %%
list_time_ix=[]
regex = '[+-]?[0-9]+\.?[0-9]*'
for ix, line in enumerate(list_lines):
if 'TIME' in line:
list_time_ix.append((ix, int(float(re.findall(regex, line)[0]) ) ) )
# %% helper functions
#segment 1 with different format
def linesegment1(in_line):
seg1_str = in_line.strip()
seg1_list = re.split('\s+', seg1_str)
name_waste = seg1_list[0]
seg1_list=seg1_list[1:]
for ix, val in enumerate(seg1_list):
seg1_list[ix]=float(val.replace('D','E'))
return(name_waste, seg1_list)
def linesegment2(in_line):
seg2_str = in_line.strip()
seg2_list = re.split('\s+', seg2_str)
_ = seg2_list[0:2]
seg2_list=seg2_list[2:]
for ix, val in enumerate(seg2_list):
seg2_list[ix]=float(val.replace('D','E'))
return(seg2_list)
# %%
lines_start = [a + 9 for a, b in list_time_ix]
lines_increment = [ a - c for (a, b), (c, d) in zip(list_time_ix[1:], list_time_ix[0:-1])]
lines_increment.append(lines_increment[-1])
lines_end = [ a+b -14 for a, b in zip(lines_start, lines_increment)]
# %%
colnames = ['AM241','CM244','PU238','PU239','PU240','PU241','U234','CS137',
'SR90','U233','Total','EPAUnit']
dict_Time={}
for ix, (_, key_time) in enumerate(list_time_ix):
a=lines_start[ix]
b=lines_end[ix]
segment1 = list_lines[a:b:2]
segment2 = list_lines[a+1:b+1:2]
waste_name=[]
waste_rec=[]
for line_seg1, line_seg2 in zip(segment1, segment2):
wn, wval1 =linesegment1(line_seg1)
wval2 = linesegment2(line_seg2)
waste_name.append(wn)
waste_rec.append(wval1+wval2)
temp = pd.DataFrame(waste_rec, columns=colnames)
temp.index = waste_name
dict_Time[key_time] = temp
# %% Plotting
fig, ax = plt.subplots(ncols=2, nrows=5, figsize=(12, 24), dpi=100, constrained_layout=True)
for ax, key in zip(ax.flatten(), dict_Time):
corr = dict_Time[key].corr()
ax.set_title('Correlations CH Waste Stream {} Yrs'.format(key))
hm = sns.heatmap(corr, cmap='coolwarm', annot=True, annot_kws={"size": 8,'color':'k'},
ax=ax)
################################################################
######## DBR ##################################################
import os
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import re
import argparse
from scipy.interpolate import interp2d
pd.options.mode.chained_assignment = None # default='warn'
def main():
#0. Helper Functions
#Generate a list of filename in a directory path
def list_filenames(path_input, prefix='', suffix=''):
#if input is WindosPath, convert to string
if not isinstance(path_input,str):
path_input=path_input.__str__()
print(path_input)
file_paths = sorted(
[
os.path.join(path_input, fname)
for fname in os.listdir(path_input)
if fname.startswith(prefix) and fname.endswith(suffix)
])
return(file_paths)
#read summarized panel output file and write a dataframe
def read_concentration_tbl(fn, colnames):
df = pd.read_csv(fn, header=0, names=colnames, skiprows=2, sep='\s+')
#make key
list_str = fn.split('/')[-1][:-4].split('_')
key = list_str[3:]
tmp = '_'
key = tmp.join(key)
df['fn']=key
return(df)
#read summarized dbr output file and write a dataframe
def read_dbr_tbl(fn, colnames):
df = pd.read_csv(fn, header=0, names=colnames, skiprows=2, sep='\s+')
dbr_analysis = fn.split('/')[-1][:-4].split('_')[2]
dbr_event = fn.split('/')[-1][:-4].split('_')[3:]
dbr_str = '_'
dbr_event = dbr_str.join(dbr_event)
dbr_tevent = fn.split('/')[-1][:-4].split('_')[-2][1:]
df['Analysis']=dbr_analysis
df['DBR_Event']=dbr_event
df['Event_Time'] = int(dbr_tevent)
return(df)
##########################################################
###########Core Interpolation Function ###################
##########################################################
def interp_nuclide_time_vol(df_in, str_nuclide):
#Inputs dataframe of concentration data and nuclide name
#Meant to run in a loop of the other nuclides and return
#A dictionary entry
df = df_in[['VectorName','time','PANDFVOL',str_nuclide]]
list_times = df['time'].unique()
list_brvol = df['PANDFVOL'].unique()
grid_T, grid_Br = np.meshgrid(list_times, list_brvol)
#II. Initialize dictionaries and arrays
dict_2dInterp={} #hold interpolation function
# My notatio for 2d interpolation is (X, Y), Z
Z=np.zeros_like(grid_Br) # actuals
#III. Calculate interpolation function for each vector
list_vectorNames = list(df['VectorName'].unique())
for px, key in enumerate(list_vectorNames):
df_v=df[df['VectorName'].isin([key])]
#populate Z values of times, brine volumes
for ix, time in enumerate(list_times):
for iy, brvol in enumerate(list_brvol):
#retrieve the concentration value to build a 2D table
Z[iy,ix] = np.log10(df_v.loc[(df_v['time'] == time) & (df_v['PANDFVOL'] == brvol)][str_name].values.item(0))
f=interp2d(grid_T, grid_Br, Z, kind='linear')
#Print every 25th vector as a checkpoint
if (px+1)%25==0:
print('Nuclide = {}, Vector = {}'.format(str_name, key))
dict_2dInterp[key]=f
print('Interpolation Functions for Nuclide {} Complete'.format(str_name))
return(dict_2dInterp)
#I. Input Arguments and Files
parser = argparse.ArgumentParser()
parser.add_argument('--dir_CON', type=str, default='CON_CLC_CRA19', help='Directory with Panel Concentration Data')
parser.add_argument('--clc_pre', type=str, default='panel_con_clc_CRA19_', help='concentration files starts with strings')
parser.add_argument('--clc_ext', type=str, default='.tbl', help='file should be .tbl format')
parser.add_argument('--dir_DBR', type=str, default='sum_DBR', help='Directory with DBR summarize output')
parser.add_argument('--dbr_pre', type=str, default='sum_dbr_CRA19_', help='dbr file starts with string')
parser.add_argument('--dbr_ext', type=str, default='.tbl', help='file should be .tbl format')
# Outputs
parser.add_argument('--fn_output',type=str, default='DBR_Stats.csv', help='file name of .csv file with vector mean, medians')
#Options
parser.add_argument('--reps', nargs='+', type=str, default = ['r1', 'r2', 'r3'], help='replicates calculation')
parser.add_argument('--scen', type=str, default='s2', help='expecting one scenario like s1 or s2')
# Spacer
opt = parser.parse_args()
print(opt)
#Panel Output Concentration Files units in ci/m3
con_folder = opt.dir_CON
assert os.path.exists(con_folder)
print('Success Found Vector File = {}\n'.format(con_folder) )
#DBR Output Release units in m3
dbr_folder = opt.dir_DBR
assert os.path.exists(dbr_folder)
print('Success Found Vector File = {}\n'.format(dbr_folder) )
#list of filenames in concentration directory
fn_panel_con = list_filenames(con_folder, prefix=opt.clc_pre,
suffix=opt.clc_ext)
#list of filenames in dbr directory
fn_dbr = list_filenames(dbr_folder, prefix=opt.dbr_pre, suffix=opt.dbr_ext)
#II. Panel Concentration Dataframe- CLC curries/m3 lumped concentration (Panel Output)
colnames = ['vector','time','PANDFVOL','CLCAM241','CLCPU239','CLCPU238',
'CLCU234','CLCTH230']
list_nuclides=['CLCAM241','CLCPU239','CLCPU238','CLCU234','CLCTH230']
# %% dataframe of concentrations ([ci/m3])
df_con = read_concentration_tbl(fn=fn_panel_con[0],colnames=colnames)
for fn in fn_panel_con[1:]:
df_con = pd.concat([df_con,read_concentration_tbl(fn=fn,colnames=colnames) ])
#II.A. Concentration Column Tags
df_con['CLCTOT']=df_con[['CLCAM241','CLCPU239','CLCPU238','CLCU234','CLCTH230']].sum(axis=1)
df_con['Replicate'] = df_con['fn'].apply(lambda x: x.split('_')[-2])
df_con['Scenario'] = df_con['fn'].apply(lambda x: x.split('_')[-1])
df_con['VectorName'] = df_con.apply(lambda x: x.Replicate + 'v'+str(x.vector), axis=1)
#filter only selected scenario
df_con=df_con[df_con['Scenario'].isin([opt.scen])]
df_con=df_con[df_con['Replicate'].isin(opt.reps)]
print(df_con.head(3))
######################
#II.B. Dictionary of interpolation functions by nuclide
dict_Func={}
for str_name in list_nuclides:
kwargs={'df_in':df_con, 'str_nuclide':str_name}
dict_Func[str_name]=interp_nuclide_time_vol(**kwargs)
#III. DBR Volumes
colnames = ['vector','DBR_Time','BRIN_REL','PNLBRVOL']
df_dbr = read_dbr_tbl(fn=fn_dbr[0],colnames=colnames)
for fn in fn_dbr[1:]:
df_dbr = pd.concat([df_dbr, read_dbr_tbl(fn=fn, colnames=colnames)])
#III.A. DBR Column Tags
df_dbr['Replicate'] = df_dbr['DBR_Event'].apply(lambda x: x.split('_')[0])
df_dbr['Scenario'] = df_dbr['DBR_Event'].apply(lambda x: x.split('_')[1])
df_dbr['VectorName'] = df_dbr.apply(lambda x: x.Replicate + 'v'+str(x.vector), axis=1)
#filter only selected scenario
df_dbr=df_dbr[df_dbr['Scenario'].isin([opt.scen])]
df_dbr=df_dbr[df_dbr['Replicate'].isin(opt.reps)]
print(df_dbr.head(3) )
#IV. Check dbr and concentration vectors match
assert set(df_dbr['VectorName']) == set(df_con['VectorName'])
#V. Use 2d interpolation functions to interpolate actinide concentrations [ci/m3]
dict_DBR = {}
for nuclide in list_nuclides:
#Fetch the interpolation function dictionary
dict_2dInterp=dict_Func[nuclide]
str_name = nuclide+'_Interp'
#Perform interpolation here
df_dbr[str_name]=df_dbr.apply(lambda x: 10**(dict_2dInterp[x.VectorName](x.Event_Time,x.PNLBRVOL).item(0)),axis=1)
temp = df_dbr[['VectorName','Event_Time', str_name, 'BRIN_REL']]
#'Convolution' Step here
temp['Curries']=temp['BRIN_REL']*temp[str_name]
#Create multi-index series by time, statistic
dict_DBR[nuclide]= pd.DataFrame({'Mean':temp.groupby(['Event_Time'])['Curries'].mean(),
'Median':temp.groupby(['Event_Time'])['Curries'].median(),
'Count':temp.groupby(['Event_Time'])['Curries'].size()}).stack()
print(str_name)
df_out = pd.DataFrame(dict_DBR).to_csv(opt.fn_output)
#VI. Stats and Plotting
df_dbr['Curries_Tot']=df_dbr[[nuclide+'_Interp' for nuclide in list_nuclides]].sum(axis=1) * df_dbr['BRIN_REL']
dbr_mean = df_dbr.groupby(['Event_Time'])['Curries_Tot'].mean()
dbr_count = df_dbr.groupby(['Event_Time'])['Curries_Tot'].size()
#VII. Generate Plot
#plot preliminaries
df_plot=pd.pivot_table(df_dbr, index=['VectorName'], columns=['Event_Time'], values=['Curries_Tot'])
df_plot.columns = [col[1] for col in df_plot.columns]
#labels for plot
label_mean = ["Mean \n {}".format(round(val,2)) for val in dbr_mean]
label_count = ["N={}".format(val) for val in dbr_count]
label_level = [175, 175, 175, 175, 175]
label_series = ['DBR Time\n'+str(x) for x in df_plot.columns]
fig, ax1 = plt.subplots(figsize=(5,5), dpi=150)
ax1.boxplot([df_plot[550], df_plot[750], df_plot[2000], df_plot[4000],df_plot[10000]])
ax1.plot(ax1.get_xticks(),dbr_mean, marker='^', markersize=10, markerfacecolor='orange',
markeredgecolor='orange', linewidth=0)
# Label Means
for xtick in ax1.get_xticks():
ax1.text(xtick,dbr_mean.iloc[xtick-1],label_mean[xtick-1],horizontalalignment='center',
size='medium',weight='semibold')
# Label Counts
for xtick in ax1.get_xticks():
ax1.text(xtick,label_level[xtick-1],label_count[xtick-1],horizontalalignment='center',
size='medium',weight='semibold')
ax1.set_xticklabels(label_series, fontsize=10, rotation=0)
ax1.set_ylabel('Release [Ci]', weight='semibold')
ax1.set_title('Scenario {} Radionuclide Releases\n Reps {}'.format(opt.scen, opt.reps), weight='bold')
plt.savefig('Scenario_{}_Radionuclide_Releases'.format(opt.scen))
if __name__ == '__main__':
main()
###########################################################
######### LWB############################################
import os
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import re
import argparse
from scipy.interpolate import interp2d
pd.options.mode.chained_assignment = None # default='warn'
def main():
#0. Helper Functions
#Generate a list of filename in a directory path
def list_filenames(path_input, prefix='', suffix=''):
#if input is WindosPath, convert to string
if not isinstance(path_input,str):
path_input=path_input.__str__()
print(path_input)
file_paths = sorted(
[
os.path.join(path_input, fname)
for fname in os.listdir(path_input)
if fname.startswith(prefix) and fname.endswith(suffix)
])
return(file_paths)
#read summarized panel output file and write a dataframe
def read_concentration_tbl(fn, colnames):
df = pd.read_csv(fn, header=0, names=colnames, skiprows=2, sep='\s+')
#make key
list_str = fn.split('/')[-1][:-4].split('_')
key = list_str[3:]
tmp = '_'
key = tmp.join(key)
df['fn']=key
return(df)
#read summarized dbr output file and write a dataframe
def read_nut_tbl(fn, colnames):
df = pd.read_csv(fn, header=0, names=colnames, skiprows=2, sep='\s+')
analysis = fn.split('/')[-1][:-4].split('_')[2]
event = fn.split('/')[-1][:-4].split('_')[3:]
dbr_str = '_'
event = dbr_str.join(event)
tevent = fn.split('/')[-1][:-4].split('_')[-1][1:]
df['Analysis']=analysis
df['Intrusion_Event']=event
df['Event_Time'] = int(tevent)
return(df)
def read_st2d_tbl(fn, colnames):
df = pd.read_csv(fn, header=0, names=colnames, skiprows=2, sep='\s+')
analysis = fn.split('/')[-1][:-4].split('_')[2]
mining = fn.split('/')[-1][:-4].split('_')[-1]
rep = fn.split('/')[-1][:-4].split('_')[-2]
df['Analysis']=analysis
df['Mining']=mining
df['Replicate']=rep
return(df)
def plot_count(ax_in, df_in):
ylim=ax_in.get_ylim()
xlim=ax_in.get_xlim()
ypos=ylim[0]+(ylim[1]-ylim[0])*.9
xpos=xlim[0]+(xlim[1]-xlim[0])*.5
ax_in.text(xpos, ypos, 'N = {}'.format(df_in.shape[1]), fontsize=16, ha='center')
return(ax_in)
def plot_annotation(ax_in, text_str):
ylim=ax_in.get_ylim()
xlim=ax_in.get_xlim()
ypos=ylim[0]+(ylim[1]-ylim[0])*.75
xpos=xlim[0]+(xlim[1]-xlim[0])*.5
ax_in.text(xpos, ypos, text_str, fontsize=16, ha='center', color='red')
return(ax_in)
#I. Input Arguments and Files
#Note: these options enable filtering incase there are other file types or analyses in the directory
parser = argparse.ArgumentParser()
parser.add_argument('--dir_NUTS', type=str, default='NUTS', help='Directory with Nuts release files')
parser.add_argument('--dir_PANEL', type=str, default='PANEL', help='Directory with static mole fraction files')
parser.add_argument('--dir_SECOT2D', type=str, default='SECOT2D', help='Directory with transport model files')
parser.add_argument('--nuts_pre', type=str, default='sum_nut_CRA19_', help='filter: files starts with string')
parser.add_argument('--panel_pre', type=str, default='sum_panel_st_CRA19_', help='filter: files starts with string')
parser.add_argument('--st2d_pre', type=str, default='sum_st2d_PABC', help='filter: files starts with string')
parser.add_argument('--nuts_ext', type=str, default='.tbl', help='file should be .tbl format')
parser.add_argument('--panel_ext', type=str, default='.tbl', help='file should be .tbl format')
parser.add_argument('--st2d_ext', type=str, default='.tbl', help='file should be .tbl format')
# Outputs
parser.add_argument('--fn_output',type=str, default='LWB_Stats.csv', help='file name of .csv file with vector mean, medians')
#Options
parser.add_argument('--reps', nargs='+', type=str, default = ['r1', 'r2', 'r3'], help='replicates calculation')
parser.add_argument('--scen', type=str, default='s2', help='expecting one scenario like s1 or s2')
parser.add_argument('--mining', type=str, default='mf', help='mining scenario either mp or mf')
parser.add_argument('--intrusion_time', type=int, default= 100, help='intrusion time for nuts release model')
# Spacer
opt = parser.parse_args()
print(opt)
#Nuts release data in units of [ci]
nuts_folder = opt.dir_NUTS
assert os.path.exists(nuts_folder)
print('Success Directory = {}\n'.format(nuts_folder) )
#Panel output in unit of decimal
panel_folder = opt.dir_PANEL
assert os.path.exists(panel_folder)
print('Success Found Directory = {}\n'.format(panel_folder) )
#SECOT2D output in unit of fraction of 1kg
st2d_folder = opt.dir_SECOT2D
assert os.path.exists(st2d_folder)
print('Success Found Directory = {}\n'.format(st2d_folder) )
#list of filenames directories
fn_nut = list_filenames(nuts_folder, prefix=opt.nuts_pre,
suffix=opt.nuts_ext)
fn_stfr = list_filenames(panel_folder, prefix=opt.panel_pre,
suffix=opt.panel_ext)
fn_st2d = list_filenames(st2d_folder, prefix=opt.st2d_pre,
suffix=opt.st2d_ext)
#II. dataframe of static mole fractions that don't transport in Culebra from Panel
colnames = ['vector','time',
'AMFRCMIC','AMFRCINT','AMFRCMIN',
'PUFRCMIC','PUFRCINT','PUFRCMIN',
'UFRCMIC','UFRCINT','UFRCMIN',
'THFRCMIC','THFRCINT','THFRCMIN']
df_stfr = read_concentration_tbl(fn=fn_stfr[0], colnames=colnames)
for fn in fn_stfr[1:]:
df_stfr = pd.concat([df_stfr,read_concentration_tbl(fn=fn,colnames=colnames) ])
#Create Vector Names r1v1 etc
df_stfr['Replicate']=df_stfr['fn'].apply(lambda x: x.split('_')[-2])
df_stfr['Scenario']=df_stfr['fn'].apply(lambda x: x.split('_')[-1])
df_stfr['VectorName']=df_stfr.apply(lambda x: x.Replicate + 'v'+str(x.vector), axis=1)
#filter df to scenario and replicates from input args
df_stfr=df_stfr[df_stfr['Scenario'].isin([opt.scen])]
df_stfr=df_stfr[df_stfr['Replicate'].isin(opt.reps)]
#Static Mole Lumped of nuclide fraction not transportable in Culebra
df_stfr['AM241_SML']=df_stfr[['AMFRCMIC','AMFRCINT','AMFRCMIN']].sum(axis=1)
df_stfr['PU239_SML']=df_stfr[['PUFRCMIC','PUFRCINT','PUFRCMIN']].sum(axis=1)
df_stfr['U234_SML']=df_stfr[['UFRCMIC','UFRCINT','UFRCMIN']].sum(axis=1)
df_stfr['TH230_SML']=df_stfr[['THFRCMIC','THFRCINT','THFRCMIN']].sum(axis=1)
#Static Mole Fraction of Lumped Radionuclide not transportable in Culebra
list_sml=['AM241_SML','PU239_SML','U234_SML','TH230_SML']
#Dataframe with fraction transportable in Culebra
dict_Mobile={}
for col in list_sml:
df_stfr[col]=df_stfr[col].apply(lambda x: 1-x)
key = col
dict_Mobile[key] = {vector:val for vector, val in zip(df_stfr['VectorName'].values,df_stfr[col].values)}
#III. datafame of releases in curries to Culebra from Nuts
colnames = ['vector','time','A00AM241','A00PU239','A00U234','A00TH230','EPALWMBT']
df_nut = read_nut_tbl(fn=fn_nut[0],colnames=colnames)
for fn in fn_nut[1:]:
df_nut = pd.concat([df_nut, read_nut_tbl(fn=fn, colnames=colnames)])
#Create Vector Names r1v1 etc
df_nut['Replicate']=df_nut['Intrusion_Event'].apply(lambda x: x.split('_')[0])
df_nut['Scenario']=df_nut['Intrusion_Event'].apply(lambda x: x.split('_')[1])
df_nut['VectorName']=df_nut.apply(lambda x: x.Replicate + 'v'+str(x.vector), axis=1)
#filter df to scenario and replicates from input args
df_nut=df_nut[df_nut['Scenario'].isin([opt.scen])]
df_nut=df_nut[df_nut['Replicate'].isin(opt.reps)]
df_nut=df_nut[df_nut['Event_Time'].isin([opt.intrusion_time])]
#Sometimes the nuts output has values of E-100, the thrid exponent digit truncates the "E"
#and values look like 1.123456-100 instead of 1.123456E-100 if regex not matched, overwrite 0
reg_exp = '[+\-]?[^\w]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+)'
list_filt = ['A00AM241','A00PU239','A00U234','A00TH230']
for col in list_filt:
df_nut[col] = df_nut[col].apply(lambda x: x if re.match(reg_exp, str(x)) else 0)
df_nut[col] = df_nut[col].apply(lambda x: float(x))
# Verify common set of vectors to match panel and nuts models
assert set(df_nut['VectorName']) == set(df_stfr['VectorName'])
# Scale to transportable fraction
list_trans = ['AM241_Trans', 'PU239_Trans','U234_Trans','TH230_Trans']
def scale(fraction,total):
return (fraction*total)
for nut, mob, trans in zip(list_filt,list_sml, list_trans):
lookup=dict_Mobile[mob]
df_nut[trans]=df_nut.apply(lambda x: lookup[x.VectorName]*x[nut],axis=1)
# fractional release from Culebra
colnames = ['vector','time','MT2AM241','MT2PU239','MT2U234','MT2TH230','MT2TH23A']
df_st2d = read_st2d_tbl(fn=fn_st2d[0],colnames=colnames)
for fn in fn_st2d[1:]:
df_st2d = pd.concat([df_st2d, read_st2d_tbl(fn=fn, colnames=colnames)])
df_st2d['VectorName']=df_st2d.apply(lambda x: x.Replicate + 'v'+str(x.vector), axis=1)
# Verify common set of vectors to match panel, nuts, secot2d models
assert set(df_nut['VectorName']) == set(df_st2d['VectorName'])
#IV. Mass transport from Secot2d
list_mt=['MT2AM241','MT2PU239','MT2U234','MT2TH230','MT2TH23A']
#Sometimes the output has values of E-100, the thrid exponent digit truncates the "E"
#and values look like 1.123456-100 instead of 1.123456E-100 if regex not matched, overwrite 0
reg_exp = '[+\-]?[^\w]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+)'
for col in list_mt:
df_st2d[col] = df_st2d[col].apply(lambda x: x if re.match(reg_exp, str(x)) else 0)
df_st2d[col] = df_st2d[col].apply(lambda x: float(x))
df_st2d['MT2TH_Tot']=df_st2d.apply(lambda x: x.MT2TH230 + x.MT2TH23A, axis=1)
#V. Flag negative Culebra mass transport flows, replace with zero, and output 50yr flow steps
dict_st2d_dQ = {}
st2d_keys=['MT2AM241','MT2PU239','MT2U234','MT2TH_Tot']
for key in st2d_keys:
table_temp = df_st2d.pivot_table(index=['time'], columns=['VectorName'],values=[key])
table_temp.columns = [col[1] for col in table_temp.columns]
table_temp2 = table_temp.copy()
dict_table = {}
#Convert from cumulative output to incremental output
for vector in table_temp2.columns:
v0 = table_temp2[vector].values[1:]
v1 = table_temp2[vector].values[:-1]
dv = v0 - v1
dv = np.where(dv>=0, dv, 0)
dv = np.insert(dv,0,v1[0],axis=0)
dict_table[vector]=dv
dict_st2d_dQ[key]=pd.DataFrame(dict_table).set_index(table_temp.index)
#VI. Flag negative nuts flows and replace with zero, output 50 yr flow steps
dict_Nuts_dQ={}
nut_keys = ['<KEY>','A00PU239','A00U234','A00TH230']
for key in nut_keys:
table_temp = df_nut.pivot_table(index=['time'], columns=['VectorName'],values=[key])
table_temp.columns = [col[1] for col in table_temp.columns]
table_temp2 = table_temp.copy()
dict_table = {}
#Convert from cumulative to incremental output
for vector in table_temp2.columns:
v0 = table_temp2[vector].values[1:]
v1 = table_temp2[vector].values[:-1]
dv = v0 - v1
dv = np.insert(dv,0,0,axis=0)
dv = np.where(dv>0, dv, 0)
dv = list(dv)
dict_table[vector]=dv
dict_Nuts_dQ[key] = pd.DataFrame(dict_table).set_index(table_temp.index)
#VII. Convolve to Culebra with from Culebra to calculate radionuclides to the lwb
lwb_keys = ['AM241','PU239','U234','TH230A']
dict_lwb = {}
for nut, st2d, lwb in zip(nut_keys, st2d_keys, lwb_keys):
dict_temp = {}
for vector in dict_Nuts_dQ[nut].columns:
qnuts = dict_Nuts_dQ[nut][vector].values
qst2d = dict_st2d_dQ[st2d][vector].values
######## convolution step here #############
qconv = np.convolve(qnuts,qst2d)[0:len(qnuts)]
dict_temp[vector]=qconv
dict_lwb[lwb]=pd.DataFrame(dict_temp).set_index(dict_Nuts_dQ[nut].index)
#VIII. Aggregate Stats and write ouput
df_lwb_lumped = dict_lwb[lwb_keys[0]]*0
for lwb in lwb_keys:
df_lwb_lumped = df_lwb_lumped + dict_lwb[lwb]
time_max_mean = df_lwb_lumped.mean(axis=1).idxmax()
list_mean=[]
list_median=[]
list_count=[]
for lwb in lwb_keys:
list_mean.append(dict_lwb[lwb].mean(axis=1)[time_max_mean])
list_median.append(dict_lwb[lwb].median(axis=1)[time_max_mean])
list_count.append(dict_lwb[lwb].shape[1])
df_out = pd.DataFrame({'Mean [Ci]':list_mean, 'Median [Ci]':list_median,'Count':list_count,
'Period End':time_max_mean,'Duration':'50 Yrs'}, index=lwb_keys)
df_out.to_csv(opt.fn_output)
#IX. Generate Plot
for nut, st2d, lwb in zip(nut_keys, st2d_keys, lwb_keys):
df_Convolved = dict_lwb[lwb]
df_NutsdQ = dict_Nuts_dQ[nut]
df_st2d = dict_st2d_dQ[st2d]
fig, ax = plt.subplots(ncols=3,nrows=1, figsize=(12,4), dpi=125)
ax[0].set_title('{} Release to Culebra'.format(nut))
ax[1].set_title('{} Release to LWB'.format(st2d))
ax[2].set_title('{} Release to LWB'.format(lwb))
for vector in df_NutsdQ.columns:
horsetail = df_NutsdQ[vector]
ax[0].plot(horsetail.index, horsetail, color='k', alpha=.5)
ax[0].set_ylabel('50 Yr Time Step Release to Culebra [Ci]')
ax[0].set_xlabel('Time Post Closure [Yr]')
plt.setp(ax[0].get_xticklabels(),rotation=90)
plot_count(ax_in=ax[0], df_in=df_Convolved)
for vector in df_st2d.columns:
horsetail = df_st2d[vector]
ax[1].plot(horsetail.index, horsetail, color='k', alpha=.5)
ax[1].set_ylabel('50 Yr Time Step Fraction of Unit Kg')
ax[1].set_xlabel('Time Post Closure [Yr]')
plt.setp(ax[1].get_xticklabels(),rotation=90)
plot_count(ax_in=ax[1], df_in=df_Convolved)
vector_mean = df_Convolved.mean(axis=1)
for vector in df_Convolved.columns:
horsetail = df_Convolved[vector]
ax[2].plot(horsetail.index, horsetail, color='k', alpha=.5)
ax[2].plot(vector_mean, color='r')
ax[2].set_ylabel('50 Yr Time Step Release to LWB [Ci]')
ax[2].set_xlabel('Time Post Closure [Yr]')
plt.setp(ax[2].get_xticklabels(),rotation=90)
plot_count(ax_in=ax[2], df_in=df_Convolved)
plot_annotation(ax_in=ax[2], text_str='Mean = Red')
fig.tight_layout()
plt.savefig('ConvolutionPlot{}'.format(lwb))
if __name__ == '__main__':
main()
# %%
import numpy as np
from scipy import interpolate
from math import *
import matplotlib.pyplot as plt
### Make polar grid ###
rvec = np.arange(1.0, 11.0, 1.0)
tvec = np.arange(pi/10.0, pi, pi/10.0)
Nr = len(rvec)
Nt = len(tvec)
X = np.empty([Nr,Nt])
Y = np.empty([Nr,Nt])
Z = np.empty([Nr,Nt])
for i in range(Nr):
for j in range(Nt):
r = rvec[i]
t = tvec[j]
X[i,j] = r*sin(t)
Y[i,j] = r*cos(t)
Z[i,j] = cos(t)/pow(r,3) # cos(theta)/r^3: Br of dipole
### Do the interpolation ###
interp_poly = interpolate.interp2d(X,Y,Z, kind='linear')
#tck = interpolate.bisplrep(X,Y,Z, kx=3, ky=3)
### interpolate onto new grid ###
rnew = np.arange(1.0, 11.1, 0.1)
tnew=np.array([pi/10, pi/5, pi/4, pi/3, pi/2, .75*pi, pi])
#tnew = np.arange(pi/100.0, pi, pi/100.0)
Nr2 = len(rnew)
Nt2 = len(tnew)
X2 = np.empty([Nr2,Nt2])
Y2 = np.empty([Nr2,Nt2])
Z2 = np.empty([Nr2,Nt2])
for i in range(Nr2):
for j in range(Nt2):
r = rnew[i]
t = tnew[j]
X2[i,j] = r*sin(t)
Y2[i,j] = r*cos(t)
Z2[i,j] = interp_poly(X2[i,j], Y2[i,j])
#Z2[i,j] = interpolate.bisplev(X2[i,j], Y2[i,j], tck)
### Pseudocolour plot ###
fig = plt.figure()
fig.add_subplot(111, aspect='equal')
plt.pcolor(X2,Y2,Z2)
plt.plot(X,Y,marker='+')
plt.plot(X2,Y2,marker='.')
plt.colorbar()
plt.show()
# %%
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 12:59:29 2020
@author: annaklara
"""
import re
import numpy as np
import pandas as pd
import math
# %% Part A
import re
def names():
simple_string = """Amy is 5 years old, and her sister Mary is 2 years old.
Ruth and Peter, their parents, have 3 kids."""
pattern = '[A-Z][a-z]*'
result = re.findall(pattern, simple_string)
return(result)
raise NotImplementedError()
assert len(names()) == 4, "There are four names in the simple_string"
# %% part b
def grades():
with open ("/Users/annaklara/Downloads/grades.txt", "r") as file:
grades = file.read()
#my code
gradeslist = grades.splitlines()
patternGrade = '[B]$|[B][\s]$'
patternName = '[\S\s]+(?=[:])'
bstudents = []
for n in gradeslist:
if re.findall(patternGrade, n):
name = re.findall(patternName, n)[0]
bstudents.append(name)
return(bstudents)
# YOUR CODE HERE
raise NotImplementedError()
assert len(grades() ) == 16
print(grades())
# %% part c
def logs():
loglist = []
with open("assets/logdata.txt", "r") as file:
logdata = file.read()
loglines = logdata.splitlines()
patternHost = '[\d]+[\.][\d]+[\.][\d]+[.][\d]+'
patternUser = '(?<=[-][\s])[\S]+'
patternTime = '(?<=[\[])[\S]+[\s][\S]+(?=[\]])'
patternRequest = '(?<=["])[\S\s]+(?=["])'
for l in loglines:
aH = re.findall(patternHost, l)[0]
aU = re.findall(patternUser, l)[0]
aT = re.findall(patternTime, l)[0]
aR = re.findall(patternRequest, l)[0]
logDict = { 'host':aH, 'user_name':aU, 'time':aT, 'request':aR }
loglist.append(dict(logDict))
return(loglist)
# YOUR CODE HERE
raise NotImplementedError()
assert len(logs()) == 979
one_item={'host': '192.168.127.12',
'user_name': 'feest6811',
'time': '21/Jun/2019:15:45:24 -0700',
'request': 'POST /incentivize HTTP/1.1'}
assert one_item in logs(), "Sorry, this item should be in the log results, check your formating"
# %% quiz 1 workspace
# question 1
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj1 = pd.Series(sdata)
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj2 = pd.Series(sdata, index=states)
obj3 = pd.isnull(obj2)
x = obj2['California']
obj2['California'] != x #nan not a value can't compare
obj3['California']
obj2['California'] == None
math.isnan(obj2['California'])
# %%Question 2
d = {
'1': 'Alice',
'2': 'Bob',
'3': 'Rita',
'4': 'Molly',
'5': 'Ryan'
}
S = pd.Series(d)
check = S.iloc[0:3]
# %% question 3 recast column headers to capitals
ss = pd.Series( ['a', 'b', 'c'] , name = 'vals')
ss.to_frame() # problem, this was still a series
ss = pd.DataFrame({"aaaa": [1, 2, 3], "bbbb": [4, 5, 6]})
ss.rename(mapper = lambda x: x.upper(), axis = 1, inplace = True)
ss.rename(mapper = lambda x: x.upper(), axis =1)
ss = ss.rename(mapper = lambda x: x.upper(), axis = 1)
ss = ss.rename(mapper = lambda x: x.upper(), axis = 'columns')
# question 4
df = pd.DataFrame({"gre score": [1, 2, 3, 3], "toefl score": [4, 105, 107, 1066]})
df.where(df['toefl score'] > 105)
df[df['toefl score'] > 105]
df.where(df['toefl score'] > 105).dropna()
# question 6
ss = | pd.DataFrame( {'state': ['Ohio', 'Color', 'Utah', 'ny'], 'one': [0, 4, 8, 12], 'two': [1, 5, 9, 13] } ) | pandas.DataFrame |
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment
import json
import re
from datetime import datetime
import numpy as np
comm = re.compile("<!--|-->")
class Team: #change team player object
def __init__(self, team, year, player=None):
self.year = year
self.team = team
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}.html".format(self.team, self.year)).text
self.soup = BeautifulSoup(re.sub("<!--|-->","",self.team_stat),"html.parser")
def team_sum(self, four_factor = False):
summary_container = self.soup.find("table",id="team_misc")
summary_table = summary_container.find("tbody")
team_sum_row = summary_table.find_all("tr")
dict_league_rank = {row['data-stat']:row.get_text() for row in team_sum_row[1]}
dict_team_sum = {row['data-stat']:row.get_text() for row in team_sum_row[0]}
del dict_team_sum['player'], dict_league_rank['player']
df_team = pd.DataFrame(data = [dict_team_sum, dict_league_rank],index = ['TEAM','LEAGUE']).T
for column in df_team.columns:
try:
df_team[column] = pd.to_numeric(df_team[column])
except:
pass
if four_factor:
off_stats = df_team.loc[['tov_pct',
'pace', 'orb_pct', 'efg_pct', 'ft_rate']]
off_stats.columns = ['Team','OFF']
# off_stats['Team'] = off_stats['Team'].apply(lambda x: float(x))
def_stats = df_team.loc[['opp_tov_pct',
'pace', 'drb_pct', 'opp_efg_pct', 'opp_ft_rate']]
def_stats.columns = ['Team','DEF']
# def_stats['Team'] = def_stats['Team'].apply(lambda x: float(x))
return off_stats, def_stats
return df_team
def roster(self, player = None):
roster_containter = self.soup.find("tbody")
roster_vals = roster_containter.find_all('tr')
data_list = []
for row in range(len(roster_vals)):
table_data = roster_vals[row].find_all("td")
data_list.append({table_data[data_row]['data-stat']
:table_data[data_row].get_text() for data_row in range(len(table_data))})
df_roster = pd.DataFrame(data=data_list)
if player:
return df_roster[df_roster['player'].str.contains(player)].T
return df_roster
def injury_report(self,roster_update=False):
injury_table = self.soup.find("table",id="injury")
inj_body = injury_table.find("tbody")
inj_data = inj_body.find_all("tr")
df_injury = pd.DataFrame({
"player": [inj_data[data].find("th").get_text()
for data in range(len(inj_data))],
"team": [inj_data[data].find_all("td")[0].get_text() for data in range(len(inj_data))],
"date": [inj_data[data].find_all("td")[1].get_text() for data in range(len(inj_data))],
"description": [inj_data[data].find_all("td")[2].get_text() for data in range(len(inj_data))]
})
if roster_update == True:
updated = df_injury['description'].apply(lambda x: 0 if 'OUT' in x.upper().split(' ') else 1)
df_injury.description = updated
return df_injury
return df_injury
def per_game(self,player = None):
per_game_table = self.soup.find("table", id="per_game")
table_body = per_game_table.find("tbody")
table_row = table_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']
:table_data[data_row].get_text() for data_row in range(len(table_data))})
df_per_game = pd.DataFrame(data=data_row)
for column in df_per_game.columns:
try:
df_per_game[column] = pd.to_numeric(df_per_game[column])
except:
pass
if player:
return df_per_game[df_per_game['player'].str.contains(player)].T
return df_per_game
def totals(self, player = None):
totals_table = self.soup.find("table", id="totals")
totals_body = totals_table.find("tbody")
table_row = totals_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_totals = pd.DataFrame(data=data_row)
for column in df_totals.columns:
try:
df_totals[column] = | pd.to_numeric(df_totals[column]) | pandas.to_numeric |
import streamlit as st
from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
import ast
import base64
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
local_css("style.css")
st.write("""
# Steam Community Market - Advanced Price Helper App
""")
scm_url = st.text_input('Please enter the url address of Steam Community Market Listing', 'https://steamcommunity.com/market/listings/440/The%20Killing%20Tree')
# Scraping and Storing Objects
# Input from user will need to be a url for steam community market
#resp_object = requests.get('https://steamcommunity.com/market/listings/440/The%20Killing%20Tree') #url will be an input from user
resp_object = requests.get(scm_url) #url will be an input from user
soup = BeautifulSoup(resp_object.text,'html.parser')
market_listing_largeimage_url = soup.find("div",{"class":"market_listing_largeimage"}).contents[1]['src'] # item image url
price_history_string = ast.literal_eval(re.findall('(?<=line1=)(.+?\]\])',resp_object.text)[0]) # price history string
item_name = re.findall('(?<=<title>Steam Community Market :: Listings for )(.+?(?=<\/))',resp_object.text)[0] # name of item
# constructing a df with entire price history
times = []
prices = []
solds = []
for row in range(len(price_history_string)):
timestamp = price_history_string[row][0]
median_price_sold = price_history_string[row][1]
number_sold = price_history_string[row][2]
times.append(timestamp)
prices.append(median_price_sold)
solds.append(number_sold)
final_df = pd.DataFrame(list(zip(times,prices,solds)),columns=['timestamp','price_median (USD)','quantity_sold']) # constructing a dataframe with all attributes
final_df['timestamp'] = [x[:14] for x in final_df['timestamp']] # removing +0s
final_df['timestamp'] = pd.to_datetime(final_df['timestamp'],format='%b %d %Y %H').dt.tz_localize('UTC', ambiguous=True) # convert to datetime
final_df['item_name'] = item_name
final_df = final_df[[final_df.columns[-1]] + list(final_df.columns[:len(final_df.columns)-1])]
final_df['quantity_sold'] = | pd.to_numeric(final_df['quantity_sold']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
import pandas as pd
import plotly.graph_objs as go
import requests
from base64 import b64encode as be
from dash_html_components import Th, Tr, Td, A
from datetime import datetime, timedelta
from flask import request
from folium import Map
from operator import itemgetter
from os.path import join, dirname, realpath
from random import randint
from requests.auth import HTTPBasicAuth
from .maputils import create_dcircle_marker, create_tcircle_marker
from .utils import (
api_request_to_json,
json_to_dataframe,
starttime_str_to_seconds,
)
TMP = join(dirname(realpath(__file__)), '../tmp/')
LCL = join(dirname(realpath(__file__)), '../images/')
def get_rsam(ch, st):
j = api_request_to_json(f'rsam?channel={ch}&starttime={st}')
data = []
d = pd.DataFrame(j['records'][ch])
if not d.empty:
d.set_index('date', inplace=True)
data = [go.Scatter(
x=d.index,
y=d.rsam,
mode='markers',
marker=dict(size=4)
)]
return {
'data': data,
'layout': {
'margin': {
't': 30
},
'xaxis': {
'range': [d.index.min(), d.index.max()]
},
'yaxis': {
'range': [d.rsam.min() - 20, 2 * d.rsam.mean()]
}
}
}
def get_tilt(ch, st):
j = api_request_to_json(f'tilt?channel={ch}&starttime={st}')
d = pd.DataFrame(j['records'][ch])
traces = []
if not d.empty:
d.set_index('date', inplace=True)
traces.append({
'x': d.index,
'y': d['radial'],
'name': f"radial {j['used_azimuth']:.1f}"
})
traces.append({
'x': d.index,
'y': d['tangential'],
'name': f"tangential {j['tangential_azimuth']:.1f}"
})
return {
'data': traces,
'layout': {
'margin': {
't': 30
}
}
}
def get_rtnet(ch, st):
j = api_request_to_json(f'rtnet?channel={ch}&starttime={st}')
d = pd.DataFrame(j['records'][ch])
traces = []
if not d.empty:
d.set_index('date', inplace=True)
traces.append({
'x': d.index,
'y': d.east,
'name': 'East',
'mode': 'markers',
'marker': dict(
size=4
)
})
traces.append({
'x': d.index,
'y': d.north,
'name': 'North',
'mode': 'markers',
'marker': dict(
size=4
)
})
traces.append({
'x': d.index,
'y': d.up,
'name': 'Up',
'mode': 'markers',
'marker': dict(
size=4
)
})
return {
'data': traces,
'layout': {
'margin': {
't': 30
}
}
}
def get_and_store_hypos(geo, st, current_data):
if is_data_needed(st, current_data):
return get_hypos(geo, st).to_json()
else:
return current_data
def is_data_needed(st, data):
if not data:
return True
now = datetime.now()
olddata = pd.read_json(data)
mindate = olddata.date.min()
maxdate = olddata.date.max()
td = now - mindate
# Requested more than is currently stored?
seconds = starttime_str_to_seconds(st)
if seconds > (td.days * 86400 + td.seconds):
return True
# Data is old
td = now - maxdate
if (td.seconds / 60) > 10:
return True
return False
def get_hypos(geo, st):
j = api_request_to_json(f'hypocenter?geo={geo}&starttime={st}')
d = | pd.DataFrame(j['records']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import stats as sps
from . import normalizers as norm
from . import weigtings as weight
class DataMatrix:
""" Load and Prepare data matrix """
def __init__(self, path, delimiter=",", idx_col=0):
self.data = | pd.read_csv(path, delimiter=delimiter, index_col=idx_col) | pandas.read_csv |
import datetime as dt
import unittest
from typing import Any, Callable, Dict, Union, cast
from unittest.mock import MagicMock, patch
import lmfit
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose
from pandas.testing import assert_frame_equal
from darkgreybox.base_model import DarkGreyModel, DarkGreyModelResult
from darkgreybox.models import Ti
from darkgreybox.predict import map_ic_params, predict_model, predict_models
EMPTY_MODEL_RESULT = DarkGreyModelResult(np.zeros(1), {}, lmfit.Parameters(), {})
TEST_START = dt.datetime(2021, 1, 1, 7, 0)
TEST_END = dt.datetime(2021, 1, 1, 8, 0)
X_test = pd.DataFrame(
index=pd.date_range(TEST_START, TEST_END, freq='1H'),
data={
'Ta': [10, 10],
'Ph': [10, 0],
'Ti0': [10, 20]
})
y_test = | pd.Series([10, 20]) | pandas.Series |
"""Transformation of the FERC Form 714 data."""
import logging
import pathlib
import re
import geopandas
import numpy as np
import pandas as pd
import pudl
import pudl.constants as pc
logger = logging.getLogger(__name__)
##############################################################################
# Constants required for transforming FERC 714
##############################################################################
# More detailed fixes on a per respondent basis
OFFSET_CODE_FIXES = {
102: {"CPT": "CST"},
110: {"CPT": "EST"},
115: {"MS": "MST"},
118: {
"CS": "CST",
"CD": "CDT",
},
120: {
"CTR": "CST",
"CSR": "CST",
"CPT": "CST",
"DST": "CST",
"XXX": "CST",
},
133: {
"AKS": "AKST",
"AST": "AKST",
"AKD": "AKDT",
"ADT": "AKDT",
},
134: {"XXX": "EST"},
137: {"XXX": "CST"},
140: {
"1": "EST",
"2": "EDT",
"XXX": "EST",
},
141: {"XXX": "CST"},
143: {"MS": "MST"},
146: {"DST": "EST"},
148: {"XXX": "CST"},
151: {
"DST": "CDT",
"XXX": "CST",
},
153: {"XXX": "MST"},
154: {"XXX": "MST"},
156: {"XXX": "CST"},
157: {"DST": "EDT"},
161: {"CPT": "CST"},
163: {"CPT": "CST"},
164: {"XXX": "CST"},
165: {"CS": "CST"}, # Uniform across the year.
173: {
"CPT": "CST",
"XXX": "CST",
},
174: {
"CS": "CDT", # Only shows up in summer! Seems backwards.
"CD": "CST", # Only shows up in winter! Seems backwards.
"433": "CDT",
},
176: {
"E": "EST",
"XXX": "EST",
},
186: {"EAS": "EST"},
189: {"CPT": "CST"},
190: {"CPT": "CST"},
193: {
"CS": "CST",
"CD": "CDT",
},
194: {"PPT": "PST"}, # LADWP, constant across all years.
195: {"CPT": "CST"},
208: {"XXX": "CST"},
211: {
"206": "EST",
"DST": "EDT",
"XXX": "EST",
},
213: {"CDS": "CDT"},
216: {"XXX": "CDT"},
217: {
"MPP": "MST",
"MPT": "MST",
},
224: {"DST": "EST"},
225: {
"EDS": "EDT",
"DST": "EDT",
"EPT": "EST",
},
226: {"DST": "CDT"},
230: {"EPT": "EST"},
233: {"DST": "EDT"},
234: {
"1": "EST",
"2": "EDT",
"DST": "EDT",
},
# Constant across the year. Never another timezone seen.
239: {"PPT": "PST"},
243: {"DST": "PST"},
245: {"CDS": "CDT"},
248: {"DST": "EDT"},
253: {"CPT": "CST"},
254: {"DST": "CDT"},
257: {"CPT": "CST"},
259: {"DST": "CDT"},
264: {"CDS": "CDT"},
271: {"EDS": "EDT"},
275: {"CPT": "CST"},
277: {
"CPT": "CST",
"XXX": "CST",
},
281: {"CEN": "CST"},
288: {"XXX": "EST"},
293: {"XXX": "MST"},
294: {"XXX": "EST"},
296: {"CPT": "CST"},
297: {"CPT": "CST"},
298: {"CPT": "CST"},
299: {"CPT": "CST"},
307: {"PPT": "PST"}, # Pacificorp, constant across the whole year.
308: {
"DST": "EDT",
"EDS": "EDT",
"EPT": "EST",
},
328: {
"EPT": "EST",
},
}
OFFSET_CODE_FIXES_BY_YEAR = [
{
"utility_id_ferc714": 139,
"report_year": 2006,
"utc_offset_code": "PST"
},
{
"utility_id_ferc714": 235,
"report_year": 2015,
"utc_offset_code": "MST"
},
{
"utility_id_ferc714": 289,
"report_year": 2011,
"utc_offset_code": "CST"
},
{
"utility_id_ferc714": 292,
"report_year": 2011,
"utc_offset_code": "CST"
},
]
BAD_RESPONDENTS = [
319,
99991,
99992,
99993,
99994,
99995,
]
"""Fake respondent IDs for database test entities."""
OFFSET_CODES = {
"EST": pd.Timedelta(-5, unit="hours"), # Eastern Standard
"EDT": pd.Timedelta(-5, unit="hours"), # Eastern Daylight
"CST": pd.Timedelta(-6, unit="hours"), # Central Standard
"CDT": pd.Timedelta(-6, unit="hours"), # Central Daylight
"MST": pd.Timedelta(-7, unit="hours"), # Mountain Standard
"MDT": pd.Timedelta(-7, unit="hours"), # Mountain Daylight
"PST": pd.Timedelta(-8, unit="hours"), # Pacific Standard
"PDT": pd.Timedelta(-8, unit="hours"), # Pacific Daylight
"AKST": pd.Timedelta(-9, unit="hours"), # Alaska Standard
"AKDT": pd.Timedelta(-9, unit="hours"), # Alaska Daylight
"HST": | pd.Timedelta(-10, unit="hours") | pandas.Timedelta |
from typing import List
import numpy as np
import pandas as pd
import scipy.io
from graphysio.plotwidgets.curves import CurveItem
def curves_to_matlab(
curves: List[CurveItem], filepath: str, index_label: str = 'timens'
) -> None:
sers = [c.series for c in curves]
data = | pd.concat(sers, axis=1) | pandas.concat |
import requests
import pandas as pd
import urllib.error
import os
''' This module provides a class to work with the downloaded db
of Gwas Catalog. I had to write this because I couldn't access
(for whatever reason) the Gwas Catalog Rest API documentation.
The class is initialitiated by downloading the GWAS Catalog
and then extracting the various fields to create a Gwas Catalog
object. Getter methods will be defined to obtain required information '''
url = 'https://www.ebi.ac.uk/gwas/api/search/downloads/alternative'
#catalog class
class GwasCatalog:
def __init__(self, catalog):
self.catalog = catalog
#prints the catalog
def showCatalog(self):
print(self.catalog)
#shows catalog columns indexes
def showAttributes(self):
for column in list(self.catalog.columns):
print(column)
#extracts single column from catalog as pandas Series object
def getColumn(self, index):
series = self.catalog[index]
return series
#search a column and returns all matching values rows
def batchSearch(self, column_name, ids):
results = self.catalog[self.catalog[column_name].isin(ids)]
return results
#same as batchSearch() but returns only selected columns (features)
def batchRetrieve(self, column_name, ids, features):
df = self.catalog[self.catalog[column_name].isin(ids)]
series = [df[column_name]]
for feature in features:
series.append(df[feature])
print(len(series))
dataf = | pd.DataFrame(series) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 11:13:15 2019
@author: jkern
"""
from __future__ import division
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
def hydro(sim_years):
#########################################################################
# This purpose of this script is to use synthetic streamflows at major California
# reservoir sites to simulate daily hydropower production for the PG&E and SCE
# zones of the California electricty market (CAISO), using parameters optimized
# via a differential evolution algorithm.
#########################################################################
# load California storage reservoir (ORCA) sites
df_sites = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name = 'ORCA',header=0)
ORCA_sites = list(df_sites)
# load upper generation amounts for each predicted hydropower dam (PG&E and SCE)
upper_gen = pd.read_excel('CA_hydropower/upper.xlsx',header =0)
# month-day calender
calender = pd.read_excel('CA_hydropower/calender.xlsx',header=0)
# load simulated full natural flows at each California storage reservoir (ORCA site)
df_sim = pd.read_csv('Synthetic_streamflows/synthetic_streamflows_CA.csv',header=0)
df_sim = df_sim.loc[0:(sim_years+3)*365,:]
# load simulated outflows calculated by ORCA
df_ORCA = pd.read_csv('ORCA_output.csv')
outflow_sites = ['SHA_otf','ORO_otf','YRS_otf','FOL_otf','NML_otf','DNP_otf','EXC_otf','MIL_otf','ISB_otf','SUC_otf','KWH_otf','PFT_otf']
for i in range(0,len(df_ORCA)):
for s in outflow_sites:
df_sim.loc[i,s] = df_ORCA.loc[i,s]
sim_years = sim_years+3
#Add month and day columns to the dataframe
Month = []
Day = []
count = 0
for i in range(0,len(df_sim)):
if count < 365:
Month = np.append(Month,calender.loc[count,'Month'])
Day = np.append(Day,calender.loc[count,'Day'])
count = count + 1
else:
count = 0
Month = np.append(Month,calender.loc[count,'Month'])
Day = np.append(Day,calender.loc[count,'Day'])
count = count + 1
df_sim['Month']=Month
df_sim['Day']=Day
# calculate simulated totals
Sim_totals = []
for i in range(0,sim_years):
sample = df_sim.loc[i*365:i*365+365,'ORO_fnf':'ISB_fnf']
total = np.sum(np.sum(sample))
Sim_totals = np.append(Sim_totals,total)
# load historical full natural flows for 2001, 2005, 2010 and 2011
df_hist = pd.read_excel('CA_hydropower/hist_reservoir_inflows.xlsx',header=0)
Hist_totals = []
Hist_years = [2001,2005,2010,2011]
for i in Hist_years:
sample = df_hist[df_hist['year'] == i]
sample = sample.loc[:,'ORO_fnf':'ISB_fnf']
total = np.sum(np.sum(sample))
Hist_totals = np.append(Hist_totals,total)
# find most similar historical year for each simulated year
Rule_list=[]
for i in range(0,sim_years):
Difference=abs(Sim_totals[i]- Hist_totals)
#Select which rule to use
for n in range(0,len(Hist_years)):
if Difference[n]==np.min(Difference):
Rule=n
Rule_list.append(Rule)
# PGE hydro projects
PGE_names = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name ='PGE',header=0)
PGE_dams = list(PGE_names.loc[:,'Balch 1':])
PGE_Storage=[PGE_dams[3],PGE_dams[7],PGE_dams[8],PGE_dams[9]]
PGE_No_Data_Dams=[PGE_dams[2],PGE_dams[4],PGE_dams[10],PGE_dams[11],PGE_dams[15],PGE_dams[16],PGE_dams[17],PGE_dams[26],PGE_dams[30],PGE_dams[38],PGE_dams[39],PGE_dams[55],PGE_dams[60],PGE_dams[65]]
## SCE hydro projects
SCE_names = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name ='SCE',header=0)
SCE_dams = list(SCE_names.loc[:,'Big_Creek_1 ':])
SCE_No_Data_Dams=[SCE_dams[7],SCE_dams[8],SCE_dams[12]]
#Simulate all the PGE inflow dams
check_unused = []
PGE_name_list = []
SCE_name_list = []
f_horizon = 7
for name in PGE_dams:
STOR = np.zeros((365*(sim_years),1))
for year in range(0,sim_years):
GEN = np.zeros((365,7))
if name in PGE_No_Data_Dams:
pass
elif name in PGE_Storage:
# which operating rule to use?
Rule=Rule_list[year]
File_name='CA_hydropower/PGE_Storage_FNF_V2/1.0_FNF_Storage_Rule_' + str(name) +'.txt'
Temp_Rule=pd.read_csv(File_name,delimiter=' ',header=None)
peak_flow,starting,ending,refill_1_date,evac_date,peak_end,refill_2_date,storage,power_cap,eff,min_power=Temp_Rule.loc[Rule][:]
k = str(PGE_names.loc[0][name])
I_O=str(PGE_names.loc[1][name])
#Which site to use
if k =='Oroville' and I_O =='Inflows':
site_name=['ORO_fnf']
elif k =='Oroville' and I_O =='Outflows':
site_name=['ORO_otf']
elif k =='Pine Flat' and I_O =='Inflows':
site_name=['PFT_fnf']
elif k =='Pine Flat' and I_O =='Outflows':
site_name=['PFT_otf']
elif k =='Shasta' and I_O =='Inflows':
site_name=['SHA_fnf']
elif k =='Shasta' and I_O =='Outflows':
site_name=['SHA_otf']
elif k =='New Melones' and I_O =='Inflows':
site_name=['NML_fnf']
elif k =='New Melones' and I_O =='Outflows':
site_name=['NML_otf']
elif k =='Pardee' and I_O =='Inflows':
site_name=['PAR_fnf']
elif k =='Pardee' and I_O =='Outflows':
site_name=['PAR_otf']
elif k =='New Exchequer' and I_O =='Inflows':
site_name=['EXC_fnf']
elif k =='New Exchequer' and I_O =='Outflows':
site_name=['EXC_otf']
elif k =='Folsom' and I_O =='Inflows':
site_name=['FOL_fnf']
elif k =='Folsom' and I_O =='Outflows':
site_name=['FOL_otf']
elif k =='<NAME>' and I_O =='Inflows':
site_name=['DNP_fnf']
elif k =='<NAME>' and I_O =='Outflows':
site_name=['DNP_otf']
elif k =='Millerton' and I_O =='Inflows':
site_name=['MIL_fnf']
elif k =='Millerton' and I_O =='Outflows':
site_name=['MIL_otf']
elif k =='Isabella' and I_O =='Inflows':
site_name=['ISB_fnf']
elif k =='Isabella' and I_O =='Outflows':
site_name=['ISB_otf']
elif k =='Yuba' and I_O =='Inflows':
site_name=['YRS_fnf']
elif k =='Yuba' and I_O =='Outflows':
site_name=['YRS_otf']
else:
None
flow_ts = df_sim.loc[:,site_name].values
# iterate through every day of the year
for day in range(0,365):
for fd in range(0,f_horizon):
s = day + fd
#forecast day? if not, take beginning storage from previous time step
if day>0 and fd < 1:
storage = STOR[year*365+day-1]
elif day<1 and fd <1:
storage = 0
else:
pass
# available hydro production based on water availability
avail_power = flow_ts[year*365+day]*eff
# if it's during first refill
if s < refill_1_date:
gen =starting- ((starting-min_power)/refill_1_date)*s
storage = avail_power-gen
# if it maintains the water
elif s >= refill_1_date and s < evac_date:
gen=min_power
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif s >= evac_date and s < peak_end:
gen= min_power+ ((power_cap-min_power)/(peak_end-evac_date)* (s- evac_date))
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif s >= peak_end and s < refill_2_date:
gen= power_cap
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
elif s >=refill_2_date :
gen = power_cap-((power_cap-ending)/(365-refill_2_date)* (s-refill_2_date))
GEN[day,fd] = gen
if fd < 1:
STOR[year*365+day] = storage
else:
upper_now=upper_gen.loc[upper_gen.loc[:,'Name']== name]
upper_now=upper_now.reset_index(drop=True)
upper=upper_now.loc[0]['Max Gen']
Rule=Rule_list[year]
File_name='CA_hydropower/PGE_FNF_2/FNF_' + str(name) +'.txt'
Temp_Rule= | pd.read_csv(File_name,delimiter=' ',header=None) | pandas.read_csv |
# Copyright 2016 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pandas import DataFrame
def parse_kegg_brite(brite_file):
kegg = DataFrame(columns=['group', 'family', 'level', 'target', 'generic_name',
'name', 'drug_type', 'kegg_drug_id'])
with open(brite_file) as kegg_data:
group = None
family = None
generic_name = None
level = None
target = None
i = 0
for line in kegg_data:
line = line.strip("\n")
if line.startswith("A"):
group = line[1:].strip("<b>").strip("<b/>")
if group != "Enzymes":
continue
else:
if line.startswith("B"):
family = line[1:].strip()
level = family
elif line.startswith("C"):
target = line[1:].strip()
elif line.startswith("D"):
generic_name = line[1:].strip()
elif line.startswith("E"):
line = line[1:].strip()
split = line.split()
name = " ".join(split[1:-2])
kegg.loc[i] = [group, family, level, target, generic_name, name, split[-1], split[0]]
i += 1
print("Found %i drugs acting on enzymes" % i)
return kegg
def parse_chebi_data(chebi_names_file, chebi_vertice_file, chebi_relation_file):
chebi_names = DataFrame.from_csv(chebi_names_file, sep="\t")
chebi_names.fillna("", inplace=True)
chebi_names.index.name = "id"
chebi_names.columns = map(str.lower, chebi_names.columns)
chebi_names.drop_duplicates('compound_id', keep='last', inplace=True)
chebi_names['adapted'] = chebi_names.adapted.apply(lambda v: v == "T")
chebi_analogues = chebi_names[chebi_names.name.str.contains('analog')]
chebi_antimetabolite = chebi_names[chebi_names.compound_id == 35221]
chebi_relations = DataFrame.from_csv(chebi_relation_file, sep="\t")
chebi_relations.columns = map(str.lower, chebi_relations.columns)
chebi_relations.index.name = "id"
chebi_vertices = DataFrame.from_csv(chebi_vertice_file, sep="\t")
chebi_vertices.columns = map(str.lower, chebi_vertices.columns)
chebi_vertices.index.name = "id"
def retrieve_child_id(compound_id):
return chebi_vertices.loc[compound_id, 'compound_child_id']
chebi_relations['init_compound_id'] = chebi_relations.init_id.apply(retrieve_child_id)
chebi_relations['final_compound_id'] = chebi_relations.final_id.apply(retrieve_child_id)
chebi_is_a = chebi_relations[chebi_relations['type'] == 'is_a']
chebi_has_role = chebi_relations[chebi_relations['type'] == 'has_role']
def recursive_search(roots, relations, universe, aggregated, forward=True):
aggregated = aggregated.append(roots, ignore_index=True)
if forward:
filtered = relations[relations.init_compound_id.isin(roots.compound_id)]
roots = universe[universe.compound_id.isin(filtered.final_compound_id)]
else:
filtered = relations[relations.final_compound_id.isin(roots.compound_id)]
roots = universe[universe.compound_id.isin(filtered.init_compound_id)]
if len(roots) > 0:
aggregated, roots = recursive_search(roots, relations, universe, aggregated, forward)
return aggregated, roots
data = DataFrame(columns=chebi_names.columns)
anti = DataFrame(columns=chebi_names.columns)
data, _ = recursive_search(chebi_analogues, chebi_is_a, chebi_names, data, True)
data, _ = recursive_search(chebi_antimetabolite, chebi_is_a, chebi_names, data, True)
anti, _ = recursive_search(chebi_antimetabolite, chebi_has_role, chebi_names, anti, True)
data, _ = recursive_search(anti, chebi_is_a, chebi_names, data, True)
data['compound_id'] = data.compound_id.apply(int)
return data
def parse_pubchem(summary_file):
pubchem = | DataFrame(columns=["name", "molecular_weight", "formula", "uipac_name", "create_date", "compound_id"]) | pandas.DataFrame |
import pyupbit
import time
from datetime import datetime
from pytz import timezone
import pandas as pd
import telegram # pip install python-telegram-bot
import json
from dotenv import load_dotenv # pip install python-dotenv
import os
def cal_target(ticker): # 변동성 돌파 전략으로 매수 목표가 설정
# time.sleep(0.1)
df_cal_target = pyupbit.get_ohlcv(ticker, "day")
yesterday = df_cal_target.iloc[-2]
today = df_cal_target.iloc[-1]
yesterday_range = yesterday['high'] - yesterday['low']
target = today['open'] + yesterday_range * 0.5
return target
def sell(ticker):
# time.sleep(0.1)
balance = upbit.get_balance(ticker)
s = upbit.sell_market_order(ticker, balance)
msg = str(ticker)+"매도 시도"+"\n"+json.dumps(s, ensure_ascii = False)
print(msg)
bot.sendMessage(mc,msg)
def buy(ticker, money):
# time.sleep(0.1)
b = upbit.buy_market_order(ticker, money)
try:
if b['error']:
b = upbit.buy_market_order(ticker, 100000)
msg = "돈이 부족해서 " + str(ticker)+" "+str(100000)+"원 매수시도"+"\n"+json.dumps(b, ensure_ascii = False)
except:
msg = str(ticker)+" "+str(money)+"원 매수시도"+"\n"+json.dumps(b, ensure_ascii = False)
print(msg)
bot.sendMessage(mc,msg)
def printall():
msg = f"------------------------------{now.strftime('%Y-%m-%d %H:%M:%S')}------------------------------\n"
for i in range(n):
msg += f"{'%10s'%coin_list[i]} 목표가: {'%11.1f'%target[i]} 현재가: {'%11.1f'%prices[i]} 매수금액: {'%7d'%money_list[i]} hold: {'%5s'%hold[i]} status: {op_mode[i]}\n"
print(msg)
def save_data(krw_balance): # 만약 존버했을 경우와 비교를 하는 함수
# 자신이 존버를 할 거라고 생각을 하고 해당 코인을 얼마나 가지고 있을 예정인지 변수 설정
own_coin_list_04_08 = [
0, # BTC 만약 자신이 존버를 할 경우 가지고 있을 법한 비트코인 개수
0, # ETH
0 # DOGE
]
df_saved_data = | pd.read_csv('saved_data.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import pandas as pd
from frappe.utils.data import add_days, getdate, get_datetime, now_datetime
# Header mapping (ERPNext <> MVD)
hm = {
'mitglied_nr': 'mitglied_nr',
'mitglied_id': 'mitglied_id',
'status_c': 'status_c',
'sektion_id': 'sektion_id',
'zuzug_sektion': 'sektion_zq_id',
'mitgliedtyp_c': 'mitgliedtyp_c',
'mitglied_c': 'mitglied_c',
'wichtig': 'wichtig',
'eintritt': 'datum_eintritt',
'austritt': 'datum_austritt',
'wegzug': 'datum_wegzug',
'zuzug': 'datum_zuzug',
'kuendigung': 'datum_kuend_per',
'adresstyp_c': 'adresstyp_c',
'adress_id': 'adress_id',
'firma': 'firma',
'zusatz_firma': 'zusatz_firma',
'anrede_c': 'anrede_c',
'nachname_1': 'nachname_1',
'vorname_1': 'vorname_1',
'tel_p_1': 'tel_p_1',
'tel_m_1': 'tel_m_1',
'tel_g_1': 'tel_g_1',
'e_mail_1': 'e_mail_1',
'zusatz_adresse': 'zusatz_adresse',
'strasse': 'strasse',
'nummer': 'nummer',
'nummer_zu': 'nummer_zu',
'postfach': 'postfach',
'postfach_nummer': 'postfach_nummer',
'plz': 'plz',
'ort': 'ort',
'nachname_2': 'nachname_2',
'vorname_2': 'vorname_2',
'tel_p_2': 'tel_p_2',
'tel_m_2': 'tel_m_2',
'tel_g_2': 'tel_g_2',
'e_mail_2': 'e_mail_2',
'datum': 'datum',
'jahr': 'jahr',
'offen': 'offen',
'ref_nr_five_1': 'ref_nr_five_1',
'kz_1': 'kz_1',
'tkategorie_d': 'tkategorie_d',
'pers_name': 'pers_name',
'datum_von': 'datum_von',
'datum_bis': 'datum_bis',
'datum_erinnerung': 'datum_erinnerung',
'notiz_termin': 'notiz_termin',
'erledigt': 'erledigt',
'nkategorie_d': 'nkategorie_d',
'notiz': 'notiz',
'weitere_kontaktinfos': 'weitere_kontaktinfos',
'mkategorie_d': 'mkategorie_d',
'benutzer_name': 'benutzer_name',
'jahr_bez_mitgl': 'jahr_bez_mitgl',
'objekt_hausnummer': 'objekt_hausnummer',
'nummer_zu': 'nummer_zu',
'objekt_nummer_zu': 'objekt_nummer_zu',
'rg_nummer_zu': 'rg_nummer_zu',
'buchungen': 'buchungen',
'online_haftpflicht': 'online_haftpflicht',
'online_gutschrift': 'online_gutschrift',
'online_betrag': 'online_betrag',
'datum_online_verbucht': 'datum_online_verbucht',
'datum_online_gutschrift': 'datum_online_gutschrift',
'online_payment_method': 'online_payment_method',
'online_payment_id': 'online_payment_id'
}
def read_csv(site_name, file_name, limit=False):
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if not migliedschaft_existiert(str(get_value(row, 'mitglied_id'))):
if get_value(row, 'adresstyp_c') == 'MITGL':
create_mitgliedschaft(row)
else:
frappe.log_error("{0}".format(row), 'Adresse != MITGL, aber ID noch nicht erfasst')
else:
update_mitgliedschaft(row)
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_mitgliedschaft(data):
try:
if get_value(data, 'vorname_2') or get_value(data, 'nachname_2'):
hat_solidarmitglied = 1
else:
hat_solidarmitglied = 0
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
kundentyp = 'Einzelperson'
if get_value(data, 'mitgliedtyp_c') == 'GESCH':
kundentyp = 'Unternehmen'
zuzug = get_formatted_datum(get_value(data, 'zuzug'))
if zuzug:
zuzug_von = get_sektion(get_value(data, 'zuzug_sektion'))
else:
zuzug_von = ''
new_mitgliedschaft = frappe.get_doc({
'doctype': 'MV Mitgliedschaft',
'mitglied_nr': str(get_value(data, 'mitglied_nr')).zfill(8),
'mitglied_id': str(get_value(data, 'mitglied_id')),
'status_c': get_status_c(get_value(data, 'status_c')),
'sektion_id': get_sektion(get_value(data, 'sektion_id')),
'mitgliedtyp_c': get_mitgliedtyp_c(get_value(data, 'mitgliedtyp_c')),
'mitglied_c': get_mitglied_c(get_value(data, 'mitglied_c')),
#'wichtig': get_value(data, 'wichtig'),
'eintritt': get_formatted_datum(get_value(data, 'eintritt')),
'austritt': get_formatted_datum(get_value(data, 'austritt')),
'wegzug': get_formatted_datum(get_value(data, 'wegzug')),
#'wegzug_zu': '', --> woher kommt diese Info?
'zuzug': zuzug,
'zuzug_von': zuzug_von,
'kuendigung': get_formatted_datum(get_value(data, 'kuendigung')),
'kundentyp': kundentyp,
'firma': get_value(data, 'firma'),
'zusatz_firma': get_value(data, 'zusatz_firma'),
'anrede_c': get_anrede_c(get_value(data, 'anrede_c')),
'nachname_1': get_value(data, 'nachname_1'),
'vorname_1': get_value(data, 'vorname_1'),
'tel_p_1': str(get_value(data, 'tel_p_1')),
'tel_m_1': str(get_value(data, 'tel_m_1')),
'tel_g_1': str(get_value(data, 'tel_g_1')),
'e_mail_1': get_value(data, 'e_mail_1'),
'zusatz_adresse': get_value(data, 'zusatz_adresse'),
'strasse': strasse,
'objekt_strasse': strasse, # fallback
'objekt_ort': get_value(data, 'ort'), # fallback
'nummer': get_value(data, 'nummer'),
'nummer_zu': get_value(data, 'nummer_zu'),
'postfach': postfach,
'postfach_nummer': get_value(data, 'postfach_nummer'),
'plz': get_value(data, 'plz'),
'ort': get_value(data, 'ort'),
'hat_solidarmitglied': hat_solidarmitglied,
'nachname_2': get_value(data, 'nachname_2'),
'vorname_2': get_value(data, 'vorname_2'),
'tel_p_2': str(get_value(data, 'tel_p_2')),
#'tel_m_2': str(get_value(data, 'tel_m_2')),
'tel_g_2': str(get_value(data, 'tel_g_2')),
'e_mail_2': str(get_value(data, 'e_mail_2'))
})
new_mitgliedschaft.insert()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n---\n{1}".format(err, data), 'create_mitgliedschaft')
return
def update_mitgliedschaft(data):
try:
mitgliedschaft = frappe.get_doc("MV Mitgliedschaft", str(get_value(data, 'mitglied_id')))
if get_value(data, 'adresstyp_c') == 'MITGL':
# Mitglied (inkl. Soli)
if get_value(data, 'vorname_2') or get_value(data, 'nachname_2'):
hat_solidarmitglied = 1
else:
hat_solidarmitglied = 0
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
kundentyp = 'Einzelperson'
if get_value(data, 'mitglied_c') == 'GESCH':
kundentyp = 'Unternehmen'
zuzug = get_formatted_datum(get_value(data, 'zuzug'))
if zuzug:
zuzug_von = get_sektion(get_value(data, 'zuzug_sektion'))
else:
zuzug_von = ''
mitgliedschaft.mitglied_nr = str(get_value(data, 'mitglied_nr')).zfill(8)
mitgliedschaft.status_c = get_status_c(get_value(data, 'status_c'))
mitgliedschaft.sektion_id = get_sektion(get_value(data, 'sektion_id'))
mitgliedschaft.mitgliedtyp_c = get_mitgliedtyp_c(get_value(data, 'mitgliedtyp_c'))
mitgliedschaft.mitglied_c = get_mitglied_c(get_value(data, 'mitglied_c'))
#mitgliedschaft.wichtig = get_value(data, 'wichtig')
mitgliedschaft.eintritt = get_formatted_datum(get_value(data, 'eintritt'))
mitgliedschaft.austritt = get_formatted_datum(get_value(data, 'austritt'))
mitgliedschaft.wegzug = get_formatted_datum(get_value(data, 'wegzug'))
mitgliedschaft.zuzug = zuzug
#mitgliedschaft.wegzug_zu = '' --> woher kommt diese Info?
mitgliedschaft.zuzug_von = zuzug_von
mitgliedschaft.kuendigung = get_formatted_datum(get_value(data, 'kuendigung'))
mitgliedschaft.kundentyp = kundentyp
mitgliedschaft.firma = get_value(data, 'firma')
mitgliedschaft.zusatz_firma = get_value(data, 'zusatz_firma')
mitgliedschaft.anrede_c = get_anrede_c(get_value(data, 'anrede_c'))
mitgliedschaft.nachname_1 = get_value(data, 'nachname_1')
mitgliedschaft.vorname_1 = get_value(data, 'vorname_1')
mitgliedschaft.tel_p_1 = str(get_value(data, 'tel_p_1'))
mitgliedschaft.tel_m_1 = str(get_value(data, 'tel_m_1'))
mitgliedschaft.tel_g_1 = str(get_value(data, 'tel_g_1'))
mitgliedschaft.e_mail_1 = get_value(data, 'e_mail_1')
mitgliedschaft.zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.strasse = strasse
mitgliedschaft.nummer = get_value(data, 'nummer')
mitgliedschaft.nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.postfach = postfach
mitgliedschaft.postfach_nummer = get_value(data, 'postfach_nummer')
mitgliedschaft.plz = get_value(data, 'plz')
mitgliedschaft.ort = get_value(data, 'ort')
mitgliedschaft.hat_solidarmitglied = hat_solidarmitglied
mitgliedschaft.nachname_2 = get_value(data, 'nachname_2')
mitgliedschaft.vorname_2 = get_value(data, 'vorname_2')
mitgliedschaft.tel_p_2 = str(get_value(data, 'tel_p_2'))
#mitgliedschaft.tel_m_2 = str(get_value(data, 'tel_m_2'))
mitgliedschaft.tel_g_2 = str(get_value(data, 'tel_g_2'))
mitgliedschaft.e_mail_2 = get_value(data, 'e_mail_2')
mitgliedschaft.adress_id_mitglied = get_value(data, 'adress_id')
elif get_value(data, 'adresstyp_c') == 'OBJEKT':
# Objekt Adresse
mitgliedschaft.objekt_zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.objekt_strasse = get_value(data, 'strasse') or 'Fehlende Angaben!'
mitgliedschaft.objekt_hausnummer = get_value(data, 'nummer')
mitgliedschaft.objekt_nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.objekt_plz = get_value(data, 'plz')
mitgliedschaft.objekt_ort = get_value(data, 'ort') or 'Fehlende Angaben!'
mitgliedschaft.adress_id_objekt = get_value(data, 'adress_id')
elif get_value(data, 'adresstyp_c') == 'RECHN':
# Rechnungs Adresse
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
mitgliedschaft.abweichende_rechnungsadresse = 1
mitgliedschaft.rg_zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.rg_strasse = strasse
mitgliedschaft.rg_nummer = get_value(data, 'nummer')
mitgliedschaft.rg_nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.rg_postfach = postfach
mitgliedschaft.rg_postfach_nummer = get_value(data, 'postfach_nummer')
mitgliedschaft.rg_plz = get_value(data, 'plz')
mitgliedschaft.rg_ort = get_value(data, 'ort')
mitgliedschaft.adress_id_rg = get_value(data, 'adress_id')
# else:
# TBD!
mitgliedschaft.save(ignore_permissions=True)
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n{1}".format(err, data), 'update_mitgliedschaft')
return
def get_sektion(id):
# Aufliestung nicht abschliessend, prüfen!
if id == 25:
return 'MVD'
elif id == 4:
return 'Bern'
elif id == 8:
return 'Basel Stadt'
elif id == 14:
return 'Luzern'
elif id == 3:
return 'Aargau'
else:
return 'Sektions-ID unbekannt'
def get_status_c(status_c):
# Aufliestung vermutlich nicht abschliessend, prüfen!
if status_c == 'AREG':
return 'Mitglied'
elif status_c == 'MUTATI':
return 'Mutation'
elif status_c == 'AUSSCH':
return 'Ausschluss'
elif status_c == 'GESTOR':
return 'Gestorben'
elif status_c == 'KUNDIG':
return 'Kündigung'
elif status_c == 'WEGZUG':
return 'Wegzug'
elif status_c == 'ZUZUG':
return 'Zuzug'
else:
return 'Mitglied'
def get_mitgliedtyp_c(mitgliedtyp_c):
# TBD!!!!!!!!!!
if mitgliedtyp_c == 'PRIV':
return 'Privat'
else:
return 'Privat'
def get_mitglied_c(mitglied_c):
# TBD!!!!!!!!!!
if mitglied_c == 'MITGL':
return 'Mitglied'
else:
return 'Mitglied'
def get_anrede_c(anrede_c):
anrede_c = int(anrede_c)
if anrede_c == 1:
return 'Herr'
elif anrede_c == 2:
return 'Frau'
elif anrede_c == 3:
return 'Frau und Herr'
elif anrede_c == 4:
return 'Herr und Frau'
elif anrede_c == 5:
return 'Familie'
elif anrede_c == 7:
return 'Herren'
elif anrede_c == 8:
return 'Frauen'
else:
return ''
def get_formatted_datum(datum):
if datum:
datum_raw = datum.split(" ")[0]
if not datum_raw:
return ''
else:
return datum_raw.replace("/", "-")
else:
return ''
def check_postfach(row, value):
value = row[hm[value]]
if not pd.isnull(value):
postfach = int(value)
if postfach < 0:
return 1
else:
return 0
else:
return 0
def get_value(row, value):
value = row[hm[value]]
if not pd.isnull(value):
try:
if isinstance(value, str):
return value.strip()
else:
return value
except:
return value
else:
return ''
def migliedschaft_existiert(mitglied_id):
anz = frappe.db.sql("""SELECT COUNT(`name`) AS `qty` FROM `tabMitgliedschaft` WHERE `mitglied_id` = '{mitglied_id}'""".format(mitglied_id=mitglied_id), as_dict=True)[0].qty
if anz > 0:
return True
else:
return False
# --------------------------------------------------------------
# Debitor Importer
# --------------------------------------------------------------
def import_debitoren(site_name, file_name, limit=False, delete_from=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_debitoren --kwargs "{'site_name': 'site1.local', 'file_name': 'offene_rechnungen.csv'}"
'''
if delete_from:
SQL_SAFE_UPDATES_false = frappe.db.sql("""SET SQL_SAFE_UPDATES=0""", as_list=True)
delete_sinvs = frappe.db.sql("""DELETE FROM `tabSales Invoice` WHERE `sektion_id` = '{delete_from}'
AND `docstatus` = 1
AND `status` = 'Overdue'""".format(delete_from=delete_from), as_list=True)
SQL_SAFE_UPDATES_true = frappe.db.sql("""SET SQL_SAFE_UPDATES=1""", as_list=True)
frappe.db.commit()
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if get_value(row, 'offen') > 0:
if not migliedschaft_existiert(str(get_value(row, 'mitglied_id'))):
frappe.log_error("{0}".format(row), 'Mitglied existiert nicht')
else:
erstelle_rechnung(row)
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def erstelle_rechnung(row):
try:
file_qrr = int(str(get_value(row, 'ref_nr_five_1')).replace(" ", ""))
qrr = '{num:027d}'.format(num=file_qrr)
existing_sinv_query = ("""SELECT `name` FROM `tabSales Invoice` WHERE REPLACE(`esr_reference`, ' ', '') = '{qrr}'""".format(qrr=qrr))
if len(frappe.db.sql(existing_sinv_query, as_list=True)) > 0:
frappe.log_error("{0}".format(row), 'Rechnung wurde bereits erstellt')
return
else:
existing_sinv_query = ("""SELECT `name` FROM `tabSales Invoice` WHERE `mv_mitgliedschaft` = '{mitglied_id}'""".format(mitglied_id=str(get_value(row, 'mitglied_id'))))
existing_sinv = frappe.db.sql(existing_sinv_query, as_dict=True)
if len(existing_sinv) > 0:
frappe.db.sql("""UPDATE `tabSales Invoice` SET `esr_reference` = '{qrr}' WHERE `name` = '{name}'""".format(qrr=qrr, name=existing_sinv[0].name), as_list=True)
frappe.log_error("{0}".format(row), 'Update QRR')
return
else:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
posting_date = str(get_value(row, 'datum')).split(" ")[0]
item = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "mitgliedschafts_artikel")
company = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "company")
cost_center = frappe.get_value("Company", company, "cost_center")
sektions_code = str(frappe.get_value("Sektion", mitgliedschaft.sektion_id, "sektion_id"))
sinv = frappe.get_doc({
"doctype": "Sales Invoice",
"company": company,
"customer": mitgliedschaft.rg_kunde or mitgliedschaft.kunde_mitglied,
"set_posting_time": 1,
"posting_date": posting_date,
"posting_time": str(get_value(row, 'datum')).split(" ")[1],
"ist_mitgliedschaftsrechnung": 1,
"mv_mitgliedschaft": mitgliedschaft.name,
"sektion_id": mitgliedschaft.sektion_id,
"sektions_code": sektions_code,
"mitgliedschafts_jahr": str(get_value(row, 'jahr')),
"due_date": add_days(posting_date, 30),
"esr_reference": qrr,
"items": [
{
"item_code": item,
"qty": 1,
"rate": get_value(row, 'offen'),
"cost_center": cost_center
}
]
})
sinv.insert()
sinv.submit()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Rechnung konnte nicht erstellt werden')
return
# --------------------------------------------------------------
# Miveba-Termin Importer
# --------------------------------------------------------------
def import_termine(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_termine --kwargs "{'site_name': 'site1.local', 'file_name': 'termine.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
create_termin(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_termin(row):
try:
kategorie = check_kategorie(row)
kontakt = check_kontakt(row)
termin_status = check_termin_status(row, 'erledigt')
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
new = frappe.get_doc({
"doctype": "Termin",
"kategorie": kategorie,
"kontakt": kontakt,
"sektion_id": sektion_id,
"von": str(get_value(row, 'datum_von')),
"bis": str(get_value(row, 'datum_bis')),
"erinnerung": str(get_value(row, 'datum_erinnerung')),
"notitz": str(get_value(row, 'notiz_termin')),
"status": termin_status,
"mv_mitgliedschaft": str(get_value(row, 'mitglied_id'))
})
new.insert()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
def check_kategorie(row):
kategorie = str(get_value(row, 'tkategorie_d'))
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
query = ("""SELECT `name` FROM `tabTerminkategorie` WHERE `kategorie` = '{kategorie}' AND `sektion_id` = '{sektion_id}'""".format(kategorie=kategorie, sektion_id=sektion_id))
kat = frappe.db.sql(query, as_list=True)
if len(kat) > 0:
return kat[0][0]
else:
new = frappe.get_doc({
"doctype": "Terminkategorie",
"kategorie": kategorie,
"sektion_id": sektion_id
})
new.insert()
frappe.db.commit()
return new.name
def check_kontakt(row):
kontakt = str(get_value(row, 'pers_name'))
if kontakt and kontakt != '':
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
query = ("""SELECT `name` FROM `tabTermin Kontaktperson` WHERE `kontakt` = '{kontakt}' AND `sektion_id` = '{sektion_id}'""".format(kontakt=kontakt, sektion_id=sektion_id))
kat = frappe.db.sql(query, as_list=True)
if len(kat) > 0:
return kat[0][0]
else:
new = frappe.get_doc({
"doctype": "Termin Kontaktperson",
"kontakt": kontakt,
"sektion_id": sektion_id
})
new.insert()
frappe.db.commit()
return new.name
else:
return ''
def check_termin_status(row, value):
value = row[hm[value]]
if not pd.isnull(value):
termin_status = int(value)
if termin_status < 0:
return 'Closed'
else:
return 'Open'
else:
return 'Open'
# --------------------------------------------------------------
# Miveba-Notizen Importer
# --------------------------------------------------------------
def import_notizen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_notizen --kwargs "{'site_name': 'site1.local', 'file_name': 'notizen.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
create_notiz(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Notiz konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_notiz(row):
try:
datum_erinnerung = str(get_value(row, 'datum_erinnerung'))
if get_datetime(datum_erinnerung) > now_datetime():
create_todo(row)
else:
create_comment(row)
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
def create_comment(row):
try:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
description = str(get_value(row, 'nkategorie_d')) + "<br>"
description += str(get_value(row, 'datum_von')) + "<br>"
description += str(get_value(row, 'notiz')) + "<br>"
description += str(get_value(row, 'benutzer_name')) + "<br>"
mitgliedschaft.add_comment('Comment', text=description)
frappe.db.commit()
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Kommentar konnte nicht erstellt werden')
def create_todo(row):
try:
description = str(get_value(row, 'nkategorie_d')) + "<br>"
description += str(get_value(row, 'datum_von')) + "<br>"
description += str(get_value(row, 'notiz')) + "<br>"
description += str(get_value(row, 'benutzer_name')) + "<br>"
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
owner = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "virtueller_user")
todo = frappe.get_doc({
"doctype":"ToDo",
"owner": owner,
"reference_type": "Mitgliedschaft",
"reference_name": str(get_value(row, 'mitglied_id')),
"description": description or '',
"priority": "Medium",
"status": "Open",
"date": str(get_value(row, 'datum_erinnerung')),
"assigned_by": owner,
"mv_mitgliedschaft": str(get_value(row, 'mitglied_id'))
}).insert(ignore_permissions=True)
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'ToDo konnte nicht erstellt werden')
# --------------------------------------------------------------
# Weitere Kontaktinfos Importer
# --------------------------------------------------------------
def import_weitere_kontaktinfos(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_weitere_kontaktinfos --kwargs "{'site_name': 'site1.local', 'file_name': 'weitere_kontaktinfos.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
erstelle_weitere_kontaktinformation(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Weitere Kontaktinformation konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def erstelle_weitere_kontaktinformation(row):
try:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
description = str(get_value(row, 'weitere_kontaktinfos')).replace("\n", "<br>")
mitgliedschaft.add_comment('Comment', text=description)
frappe.db.commit()
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Kommentar konnte nicht erstellt werden')
# --------------------------------------------------------------
# Miveba Buchungen Importer
# --------------------------------------------------------------
def import_miveba_buchungen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_miveba_buchungen --kwargs "{'site_name': 'site1.local', 'file_name': 'miveba_buchungen.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
commit_count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
mitglied_id = str(get_value(row, 'mitglied_id'))
miveba_buchungen = str(get_value(row, 'buchungen'))
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `miveba_buchungen` = '{miveba_buchungen}' WHERE `name` = '{mitglied_id}'""".format(miveba_buchungen=miveba_buchungen, mitglied_id=mitglied_id), as_list=True)
if commit_count == 1000:
frappe.db.commit()
commit_count = 1
else:
commit_count += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Miveba Buchung konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Tags Importer
# --------------------------------------------------------------
def import_tags(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_tags --kwargs "{'site_name': 'site1.local', 'file_name': 'kategorien.csv'}"
'''
from frappe.desk.tags import add_tag
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
add_tag(str(get_value(row, 'mkategorie_d')), "Mitgliedschaft", str(get_value(row, 'mitglied_id')))
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Tag konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Special Importer
# --------------------------------------------------------------
def import_special(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_special --kwargs "{'site_name': 'site1.local', 'file_name': 'jahr_bez_mitgl-PROD-1.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
commit_count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
mitglied_id = str(get_value(row, 'mitglied_id'))
jahr = str(get_value(row, 'jahr_bez_mitgl'))
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `zahlung_mitgliedschaft` = '{jahr}' WHERE `name` = '{mitglied_id}'""".format(jahr=jahr, mitglied_id=mitglied_id), as_list=True)
frappe.db.commit()
if int(jahr) == 2022:
sinvs = frappe.db.sql("""SELECT `name` FROM `tabSales Invoice` WHERE `mv_mitgliedschaft` = '{mitglied_id}' AND `status` != 'Paid' AND `docstatus` = 1""".format(mitglied_id=mitglied_id), as_dict=True)
for sinv in sinvs:
try:
sinv = frappe.get_doc("Sales Invoice", sinv.name)
sinv.cancel()
sinv.delete()
frappe.db.commit()
except Exception as e:
frappe.log_error("{0}\n\n{1}\n\n{2}".format(e, sinv.name, row), 'RG konnte nicht gelöscht werden')
commit_count += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Special konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Adressen Update
# --------------------------------------------------------------
def update_adressen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.update_adressen --kwargs "{'site_name': 'site1.local', 'file_name': 'hausnummer_zusatz_gefiltert.csv'}"
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import create_sp_queue
# display all coloumns for error handling
| pd.set_option('display.max_rows', None, 'display.max_columns', None) | pandas.set_option |
# @author <NAME>
# This code is licensed under the MIT license (see LICENSE.txt for details).
"""
Custom data structures for paperfetcher.
"""
import contextlib
import csv
import logging
import pandas as pd
import rispy
from rispy.config import LIST_TYPE_TAGS, TAG_KEY_MAPPING
from paperfetcher.exceptions import DatasetError
# Logging
logger = logging.getLogger(__name__)
class Dataset:
"""
Abstract interface that defines functions for child Dataset classes to implement.
Datasets are designed to store [usually tabular] data (as input to or output from paperfetcher searches), export
data to pandas DataFrames, and load/save data to disk using common data formats (txt, csv, xlsx).
Args:
items (iterable): Items to store in dataset (default=[]).
"""
def __init__(self, items: list = []):
self._items = list(items)
@classmethod
def from_txt(cls, file):
"""Loads dataset from .txt file."""
# Child class must implement this.
raise NotImplementedError()
@classmethod
def from_csv(cls, file):
"""Loads dataset from .csv file."""
# Child class must implement this.
raise NotImplementedError()
@classmethod
def from_excel(cls, file):
"""Loads dataset from Excel file file."""
# Child class must implement this.
raise NotImplementedError()
# Properties
def __len__(self):
return len(self._items)
def __repr__(self):
return self.__class__.__name__ + " with {} items: ".format(len(self)) + repr(self._items)
def append(self, item):
"""Adds an item to the dataset."""
self._items.append(item)
def extend(self, items: list):
"""Adds each item from a list of items to the dataset."""
self._items.extend(items)
# Export to pandas
def to_df(self):
"""Converts dataset to DataFrame."""
# Child class must implement this.
raise NotImplementedError()
# Export to disk
def save_txt(self, file):
"""Saves dataset to .txt file."""
# Child class must implement this.
raise NotImplementedError()
def save_csv(self, file):
"""Saves dataset to .csv file."""
# Child class must implement this.
raise NotImplementedError()
def save_excel(self, file):
"""Saves dataset to Excel file."""
# Child class must implement this.
raise NotImplementedError()
class DOIDataset(Dataset):
"""
Stores a dataset of DOIs.
DOIDatasets can be exported to pandas DataFrames, and loaded from or saved to disk
in text, CSV, or Excel file formats.
Args:
items (list): List of DOIs (str) to store (default=[]).
Examples:
To create a DOIDataset object from a list of DOIs:
>>> ds = DOIDataset(["x1.y1.z1/123123", "x2.y2.z2/456456"])
To add a DOI to the DOIDataset object:
>>> ds.append("x3.y3.z3/789789")
To export the DOIDataset object to a pandas DataFrame:
>>> df = ds.to_df()
>>> df
DOI
0 x1.y1.z1/123123
1 x2.y2.z2/456456
3 x3.y3.z3/789789
To save data to disk:
>>> ds.save_txt("dois.txt")
>>> ds.save_csv("dois.csv")
>>> ds.save_excel("dois.xlsx")
"""
def __init__(self, items: list = []):
super().__init__(items)
def extend_dataset(self, ds: 'DOIDataset'):
"""Appends all items from DOIDataset ds to the end of the current dataset."""
self.extend(ds._items)
def to_df(self):
"""Converts dataset to DataFrame."""
return pd.DataFrame(self._items, columns=['DOI'])
def to_txt_string(self):
"""Returns a string which can be written to .txt file"""
txt = ""
for doi in self._items:
txt = txt + doi + "\n"
return txt
def save_txt(self, file):
"""Saves dataset to .txt file."""
if hasattr(file, 'write'):
file_ctx = contextlib.nullcontext(file)
else:
if not file.endswith('.txt'):
file = file + '.txt'
file_ctx = open(file, "w")
with file_ctx as f:
f.write(self.to_txt_string())
def save_csv(self, file):
"""Saves dataset to .csv file."""
if hasattr(file, 'write'):
file_ctx = contextlib.nullcontext(file)
else:
if not file.endswith('.csv'):
file = file + '.csv'
file_ctx = open(file, "w")
with file_ctx as f:
write = csv.writer(f)
write.writerow(["DOI"])
write.writerows([[item] for item in self._items])
def save_excel(self, file):
"""Saves dataset to Excel file."""
if not file.endswith('.xlsx'):
file = file + '.xlsx'
df = | pd.DataFrame(self._items, columns=['DOI']) | pandas.DataFrame |
__docformat__ = "numpy"
import argparse
import pandas as pd
import matplotlib.pyplot as plt
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import get_flair
from gamestonk_terminal.menu import session
from gamestonk_terminal.cryptocurrency import coin_api
class CryptoController:
CHOICES = ["help", "q", "quit", "load", "view"]
def __init__(self):
""" CONSTRUCTOR """
self.crypto_parser = argparse.ArgumentParser(add_help=False, prog="crypto")
self.crypto_parser.add_argument("cmd", choices=self.CHOICES)
self.current_coin = None
self.current_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import json
import os
import pandas as pd
from pandas import Series
try:
import requests
except ImportError:
requests = None
from . import find_pmag_dir
from . import data_model3 as data_model
from pmag_env import set_env
pmag_dir = find_pmag_dir.get_pmag_dir()
data_model_dir = os.path.join(pmag_dir, 'pmagpy', 'data_model')
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(data_model_dir):
data_model_dir = os.path.join(pmag_dir, 'data_model')
VOCAB = {}
class Vocabulary(object):
def __init__(self, dmodel=None):
global VOCAB
self.vocabularies = []
self.possible_vocabularies = []
self.all_codes = []
self.code_types = []
self.methods = []
self.age_methods = []
if len(VOCAB):
self.set_vocabularies()
else:
if isinstance(dmodel, data_model.DataModel):
self.data_model = dmodel
Vocabulary.dmodel = dmodel
else:
try:
self.data_model = Vocabulary.dmodel
except AttributeError:
Vocabulary.dmodel = data_model.DataModel()
self.data_model = Vocabulary.dmodel
self.get_all_vocabulary()
VOCAB['vocabularies'] = self.vocabularies
VOCAB['possible_vocabularies'] = self.possible_vocabularies
VOCAB['all_codes'] = self.all_codes
VOCAB['code_types'] = self.code_types
VOCAB['methods'] = self.methods
VOCAB['age_methods'] = self.age_methods
VOCAB['suggested'] = self.suggested
def set_vocabularies(self):
self.vocabularies = VOCAB['vocabularies']
self.possible_vocabularies = VOCAB['possible_vocabularies']
self.all_codes = VOCAB['all_codes']
self.code_types = VOCAB['code_types']
self.methods = VOCAB['methods']
self.age_methods = VOCAB['age_methods']
self.suggested = VOCAB['suggested']
## Get method codes
def get_json_online(self, url):
"""
Use requests module to json from Earthref.
If this fails or times out, return false.
Returns
---------
result : requests.models.Response, or [] if unsuccessful
"""
if not requests:
return False
try:
req = requests.get(url, timeout=3)
if not req.ok:
return []
return req
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout):
return []
def get_meth_codes(self):
if len(VOCAB):
self.set_vocabularies()
return
raw_codes = []
# try to get meth codes online
if not set_env.OFFLINE:
try:
raw = self.get_json_online('https://www2.earthref.org/MagIC/method-codes.json')
raw_codes = pd.DataFrame(raw.json())
print('-I- Getting method codes from earthref.org')
except Exception as ex:
#print(ex, type(ex))
print("-I- Couldn't connect to earthref.org, using cached method codes")
# if you couldn't get them online, use the cache
if not len(raw_codes):
print("-I- Using cached method codes")
raw_codes = pd.io.json.read_json(os.path.join(data_model_dir, "method_codes.json"), encoding='utf-8-sig')
# parse codes
code_types = raw_codes.loc['label']
all_codes = []
for code_name in code_types.index:
if code_name == 'geoid':
continue
df = pd.DataFrame(raw_codes[code_name]['codes'])
# remake the dataframe with the code (i.e., 'SM_VAR') as the index
df.index = df['code']
del df['code']
# add a column with the code type (i.e., 'anisotropy_estimation')
df['dtype'] = code_name
little_series = df['definition']
big_series = Series()
if any(all_codes):
try: # retains pandas backwards compatibility
all_codes = pd.concat([all_codes, df], sort=True)
big_series = pd.concat([big_series, little_series], sort=True)
except TypeError:
all_codes = | pd.concat([all_codes, df]) | pandas.concat |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
assert arr.equals(pa.array(expected))
result = arr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(arr.to_pandas(), expected)
result = arr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_chunked_array_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
carr = pa.chunked_array([data])
result = carr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(carr.to_pandas(), expected)
result = carr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_column_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
column = pa.column('date', arr)
result = column.to_pandas()
npt.assert_array_equal(column.to_pandas(), expected)
result = column.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas()
df_object = table.to_pandas(date_as_object=True)
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
def test_strided_objects(self, tmpdir):
# see ARROW-3053
data = {
'a': {0: 'a'},
'b': {0: decimal.Decimal('0.0')}
}
# This yields strided objects
df = pd.DataFrame.from_dict(data)
_check_pandas_roundtrip(df)
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_list_first_empty(self):
# ARROW-2711
data = pd.Series([[], [u"a"]])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.string())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
def test_from_numpy(self):
dt = np.dtype([('x', np.int32),
(('y_title', 'y'), np.bool_)])
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(42, True), (43, False)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True},
{'x': 43, 'y': False}]
# With mask
arr = pa.array(data, mask=np.bool_([False, True]), type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True}, None]
# Trivial struct type
dt = np.dtype([])
ty = pa.struct([])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(), ()], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{}, {}]
def test_from_numpy_nested(self):
dt = np.dtype([('x', np.dtype([('xx', np.int8),
('yy', np.bool_)])),
('y', np.int16)])
ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),
pa.field('yy', pa.bool_())])),
pa.field('y', pa.int16())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2},
{'x': {'xx': 3, 'yy': False}, 'y': 4}]
@pytest.mark.large_memory
def test_from_numpy_large(self):
# Exercise rechunking + nulls
target_size = 3 * 1024**3 # 4GB
dt = np.dtype([('x', np.float64), ('y', 'object')])
bs = 65536 - dt.itemsize
block = b'.' * bs
n = target_size // (bs + dt.itemsize)
data = np.zeros(n, dtype=dt)
data['x'] = np.random.random_sample(n)
data['y'] = block
# Add implicit nulls
data['x'][data['x'] < 0.2] = np.nan
ty = pa.struct([pa.field('x', pa.float64()),
pa.field('y', pa.binary(bs))])
arr = pa.array(data, type=ty, from_pandas=True)
assert arr.num_chunks == 2
def iter_chunked_array(arr):
for chunk in arr.iterchunks():
for item in chunk:
yield item
def check(arr, data, mask=None):
assert len(arr) == len(data)
xs = data['x']
ys = data['y']
for i, obj in enumerate(iter_chunked_array(arr)):
try:
d = obj.as_py()
if mask is not None and mask[i]:
assert d is None
else:
x = xs[i]
if np.isnan(x):
assert d['x'] is None
else:
assert d['x'] == x
assert d['y'] == ys[i]
except Exception:
print("Failed at index", i)
raise
check(arr, data)
del arr
# Now with explicit mask
mask = np.random.random_sample(n) < 0.2
arr = pa.array(data, type=ty, mask=mask, from_pandas=True)
assert arr.num_chunks == 2
check(arr, data, mask)
del arr
def test_from_numpy_bad_input(self):
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
dt = np.dtype([('x', np.int32),
('z', np.bool_)])
data = np.array([], dtype=dt)
with pytest.raises(TypeError,
match="Missing field 'y'"):
pa.array(data, type=ty)
data = np.int32([])
with pytest.raises(TypeError,
match="Expected struct array"):
pa.array(data, type=ty)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = pd.Categorical([5, 5])
tm.assert_series_equal( | pd.Series(result) | pandas.Series |
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.window import ExponentialMovingWindow
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
def test_constructor(frame_or_series):
c = frame_or_series(range(5)).ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError, match=msg):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError, match=msg):
c(alpha=0.5, span=1.5)
# not valid: com < 0
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
c(com=-0.5)
# not valid: span < 1
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
c(span=0.5)
# not valid: halflife <= 0
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
msg = "alpha must satisfy: 0 < alpha <= 1"
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError, match=msg):
c(alpha=alpha)
@pytest.mark.parametrize("method", ["std", "mean", "var"])
def test_numpy_compat(method):
# see gh-12811
e = ExponentialMovingWindow(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
def test_ewma_times_not_datetime_type():
msg = r"times must be datetime64\[ns\] dtype."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(5))
def test_ewma_times_not_same_length():
msg = "times must be the same length as the object."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]"))
def test_ewma_halflife_not_correct_type():
msg = "halflife must be a timedelta convertible object"
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))
def test_ewma_halflife_without_times(halflife_with_times):
msg = "halflife can only be a timedelta convertible argument if times is not None."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=halflife_with_times)
@pytest.mark.parametrize(
"times",
[
np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"),
date_range("2000", freq="D", periods=10),
date_range("2000", freq="D", periods=10).tz_localize("UTC"),
],
)
@pytest.mark.parametrize("min_periods", [0, 2])
def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
with tm.assert_produces_warning(FutureWarning, match="nuisance columns"):
# GH#42738
result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean()
expected = df.ewm(halflife=1.0, min_periods=min_periods).mean()
tm.assert_frame_equal(result, expected)
def test_ewma_with_times_variable_spacing(tz_aware_fixture):
tz = tz_aware_fixture
halflife = "23 days"
times = DatetimeIndex(
["2020-01-01", "2020-01-10T00:04:05", "2020-02-23T05:00:23"]
).tz_localize(tz)
data = np.arange(3)
df = DataFrame(data)
result = df.ewm(halflife=halflife, times=times).mean()
expected = DataFrame([0.0, 0.5674161888241773, 1.545239952073459])
tm.assert_frame_equal(result, expected)
def test_ewm_with_nat_raises(halflife_with_times):
# GH#38535
ser = Series(range(1))
times = DatetimeIndex(["NaT"])
with pytest.raises(ValueError, match="Cannot convert NaT values to integer"):
ser.ewm(com=0.1, halflife=halflife_with_times, times=times)
def test_ewm_with_times_getitem(halflife_with_times):
# GH 40164
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
times = date_range("2000", freq="D", periods=10)
df = DataFrame({"A": data, "B": data})
result = df.ewm(halflife=halflife, times=times)["A"].mean()
expected = df.ewm(halflife=1.0)["A"].mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["com", "halflife", "span", "alpha"])
def test_ewm_getitem_attributes_retained(arg, adjust, ignore_na):
# GH 40164
kwargs = {arg: 1, "adjust": adjust, "ignore_na": ignore_na}
ewm = DataFrame({"A": range(1), "B": range(1)}).ewm(**kwargs)
expected = {attr: getattr(ewm, attr) for attr in ewm._attributes}
ewm_slice = ewm["A"]
result = {attr: getattr(ewm, attr) for attr in ewm_slice._attributes}
assert result == expected
def test_ewm_vol_deprecated():
ser = Series(range(1))
with tm.assert_produces_warning(FutureWarning):
result = ser.ewm(com=0.1).vol()
expected = ser.ewm(com=0.1).std()
tm.assert_series_equal(result, expected)
def test_ewma_times_adjust_false_raises():
# GH 40098
with pytest.raises(
NotImplementedError, match="times is not supported with adjust=False."
):
Series(range(1)).ewm(
0.1, adjust=False, times=date_range("2000", freq="D", periods=1)
)
@pytest.mark.parametrize(
"func, expected",
[
[
"mean",
DataFrame(
{
0: range(5),
1: range(4, 9),
2: [7.428571, 9, 10.571429, 12.142857, 13.714286],
},
dtype=float,
),
],
[
"std",
DataFrame(
{
0: [np.nan] * 5,
1: [4.242641] * 5,
2: [4.6291, 5.196152, 5.781745, 6.380775, 6.989788],
}
),
],
[
"var",
DataFrame(
{
0: [np.nan] * 5,
1: [18.0] * 5,
2: [21.428571, 27, 33.428571, 40.714286, 48.857143],
}
),
],
],
)
def test_float_dtype_ewma(func, expected, float_numpy_dtype):
# GH#42452
df = DataFrame(
{0: range(5), 1: range(6, 11), 2: range(10, 20, 2)}, dtype=float_numpy_dtype
)
e = df.ewm(alpha=0.5, axis=1)
result = getattr(e, func)()
tm.assert_frame_equal(result, expected)
def test_times_string_col_deprecated():
# GH 43265
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
with tm.assert_produces_warning(FutureWarning, match="Specifying times"):
result = df.ewm(halflife="1 day", min_periods=0, times="time_col").mean()
expected = df.ewm(halflife=1.0, min_periods=0).mean()
tm.assert_frame_equal(result, expected)
def test_ewm_sum_adjust_false_notimplemented():
data = Series(range(1)).ewm(com=1, adjust=False)
with pytest.raises(NotImplementedError, match="sum is not"):
data.sum()
@pytest.mark.parametrize(
"expected_data, ignore",
[[[10.0, 5.0, 2.5, 11.25], False], [[10.0, 5.0, 5.0, 12.5], True]],
)
def test_ewm_sum(expected_data, ignore):
# xref from Numbagg tests
# https://github.com/numbagg/numbagg/blob/v0.2.1/numbagg/test/test_moving.py#L50
data = Series([10, 0, np.nan, 10])
result = data.ewm(alpha=0.5, ignore_na=ignore).sum()
expected = Series(expected_data)
tm.assert_series_equal(result, expected)
def test_ewma_adjust():
vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, | Series([np.nan] * 2 + [1.0] * 4) | pandas.Series |
from building_data_requests import get_value, get_bulk
from main import HeatmapMain
import pandas as pd
import numbers
import requests
from tempfile import mkstemp
from shutil import move
from os import fdopen, remove
current_air_data = None
rooms_and_sensors = None
def init_data_tools(rooms_and_sensors_path):
global rooms_and_sensors
# Read spreadsheet into a DataFrame.
# Each row contains the following:
# - Label
# - Facility
# - Instance ID of CO2 sensor
# - Instance ID of temperature sensor
rooms_and_sensors = pd.read_csv(rooms_and_sensors_path, na_filter=False, comment='#')
update_air_data()
def update_air_data():
global current_air_data
current_air_data = get_bulk_request_df()
current_air_data['temperature'] = pd.to_numeric(current_air_data['temperature'], errors='coerce')
current_air_data['co2'] = pd.to_numeric(current_air_data['co2'], errors='coerce')
def get_request_df(room):
row = rooms_and_sensors[rooms_and_sensors['Label'] == str(room)]
if not row.empty:
try:
temp_value, temp_units = get_value(row['Facility'].iloc[0], row['Temperature'].iloc[0], True)
co2_value, co2_units = get_value(row['Facility'].iloc[0], row['CO2'].iloc[0], True)
except requests.exceptions.ConnectionError:
raise ConnectionError("Unable to get data. Are you connected to the right WiFi network?")
# Prepare to print
temp_value = int(temp_value) if temp_value else ''
temp_units = temp_units if temp_units else ''
co2_value = int(co2_value) if co2_value else ''
co2_units = co2_units if co2_units else ''
df_dictionary = {
'room': [room if temp_value and temp_units else ''],
# If there's no data, leave the dictionary empty so the failsafe below catches it
'temperature': [temp_value],
'temperature units': [temp_units],
'co2': [co2_value],
'co2 units': [co2_units]
}
if not df_dictionary:
return None
return | pd.DataFrame.from_dict(df_dictionary) | pandas.DataFrame.from_dict |
import pytest
import pandas as pd
import numpy as np
from shapely import wkt
from tenzing.core.model_implementations import *
_test_suite = [
pd.Series([1, 2, 3], name='int_series'),
pd.Series([1, 2, 3], name='categorical_int_series', dtype='category'),
pd.Series([1, 2, np.nan], name='int_nan_series'),
pd.Series([1.0, 2.1, 3.0], name='float_series'),
pd.Series([1.0, 2.5, np.nan], name='float_nan_series'),
| pd.Series([1.0, 2.0, 3.1], dtype='category', name='categorical_float_series') | pandas.Series |
'''
<NAME> (05-05-20)
Quick tutorial on importing data into pandas. See associated readme file for more information.
'''
# import statements:
import pandas as pd # the "as pd" component just allows you to reference pandas functions with the shortcut "pd."
import os # used to specify filepaths
'''
This first part setting the filepath variable is just using os.path.join to specify where our file is located, which
in this case is in a subdirectory called 'data' (combined.csv is the name of our data file). This function just takes the
subcomponents you specify (in this case 'data' and 'combined.csv', and joins them into a relative filepath. This is
helpful because it remains consistent across different OSs that specify filepaths in different ways.
'''
data_filepath = os.path.join('data', 'combined.csv')
'''
Next, we use the pd.read_csv function to load the data. In this case, our data is a csv of confirmed cases and deaths by
county and metro area, by date. Take a look at the csv file to get a sense of how this is structured- it's a pretty clean
and easy-to-load file (this is generated by the great script Aaron wrote to automatically download our case data).
For each county, there is one observation per day from late January to mid-April.
The first argument to the read_csv() function is the file, which we've specified the location of above.
dtype is another optional argument, which we're passing with a dictionary of two column ID's that we want to specify as
strings (charvars). They are numeric ID variables, and if we let Pandas infer what type they are, it might decide
they're numbers and remove leading zeroes which would make them hard to match on.
There are many other optional arguments, but because this is a very cleanly-formatted CSV, that's all we need now. It's
often necessary to specify the index column or the row containing the column names, but because column names are
located in row 0 of this spreadsheet, pandas is able to infer that.
'''
covid = | pd.read_csv(data_filepath, dtype={"CBSA Code": str, 'countyFIPS': str}) | pandas.read_csv |
import warnings
import pandas as pd
import os
import shutil
from itertools import product
import glob
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
import tensorflow as tf
sess = tf.compat.v1.Session()
from tensorflow.keras.layers import Dense, LSTM, Input, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import Ones
from tensorflow.keras.models import Model
from tensorflow.keras.models import model_from_json
from kerastuner.tuners import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters
try:
from functions import *
from configs import hyper_conf, accept_threshold_for_loss_diff, parameter_tuning_trials
from data_access import *
except Exception as e:
from .functions import *
from .configs import hyper_conf, accept_threshold_for_loss_diff, parameter_tuning_trials
from .data_access import *
def model_from_to_json(path=None, weights_path=None, model=None, is_writing=False):
if is_writing:
model_json = model.to_json()
with open(path, "w") as json_file:
json_file.write(model_json)
model.save_weights(weights_path)
else:
json_file = open(path, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights(weights_path)
return model
def updating_hyper_parameters_related_to_data():
return None
def get_params(params, comb):
count = 0
for p in params:
_p = type(params[p])(comb[count])
params[p] = _p
count += 1
return params
class TrainLSTMNewComers:
def __init__(self,
date=None,
time_indicator=None,
order_count=None,
data_source=None,
data_query_path=None,
time_period=None,
directory=None,
engaged_customers_results=pd.DataFrame(),
customer_indicator=None,
amount_indicator=None):
self.sess = tf.compat.v1.Session()
self.directory = directory
self.customer_indicator = customer_indicator
self.time_indicator = time_indicator
self.amount_indicator = amount_indicator
self.params = hyper_conf('newcomers') \
if check_for_existing_parameters(self.directory,'newcomers') is None else \
check_for_existing_parameters(self.directory, 'newcomers')
self.hyper_params = get_tuning_params(hyper_conf('newcomers_hyper'), self.params)
self.optimized_parameters = {}
self._p = None
self.order_count = order_count
self.time_period = time_period
self.engaged_customers_results = engaged_customers_results
self.data, self.features, \
self.average_amount, self.min_max = data_manipulation_nc(date=date,
order_count=order_count,
amount_indicator=amount_indicator,
time_indicator=time_indicator,
data_source=data_source,
data_query_path=data_query_path,
customer_indicator=customer_indicator,
directory=directory)
self.hp = HyperParameters()
self.model_data = {}
self.input, self.model = None, None
self.prev_model_date = check_model_exists(self.directory, "trained_newcomers_model", self.time_period)
self.residuals, self.anomaly = [], []
self.results = pd.DataFrame()
self.get_actual_value = lambda _min, _max, _value: ((_max - _min) * _value) + _min if _value >= 0 else _min
self.max_date = max(self.data[self.time_indicator])
self.future_date = self.max_date + datetime.timedelta(days=convert_time_preiod_to_days(self.time_period))
self.model_data = {"x_train": None, "y_train": None, "x_test": None, "y_test": None}
self.client_sample_sizes = []
self.optimum_batch_size = 8
def data_preparation(self):
self.model_data = arrange__data_for_model(df=self.data, f=[self.features], parameters=self.params)
def build_parameter_tuning_model(self, hp):
self.input = Input(shape=(self.model_data['x_train'].shape[1], 1))
lstm = LSTM(int(hp.Choice('units', self.hyper_params['units'])),
bias_initializer=Ones(),
kernel_initializer=Ones(),
use_bias=False,
activation=hp.Choice('activation', self.hyper_params['activation']),
dropout=0.1
)(self.input)
lstm = BatchNormalization()(lstm)
lstm = Dense(1)(lstm)
model = Model(inputs=self.input, outputs=lstm)
model.compile(loss='mae',
optimizer=Adam(lr=hp.Choice('lr', self.hyper_params['lr'])),
metrics=['mae'])
return model
def init_tf(self):
self.sess.close()
import tensorflow as tf
self.sess = tf.compat.v1.Session()
from tensorflow.keras.layers import Dense, LSTM, Input, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import Ones
from tensorflow.keras.models import Model
from tensorflow.keras.models import model_from_json
from kerastuner.tuners import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters
def build_model(self, prediction=False):
if prediction:
self.init_tf()
self.input = Input(shape=(self.model_data['x_train'].shape[1], 1))
# LSTM layer
lstm = LSTM(self.params['units'],
batch_size=self.params['batch_size'],
bias_initializer=Ones(),
kernel_initializer=Ones(),
use_bias=False,
recurrent_activation=self.params['activation'],
dropout=0.1
)(self.input)
lstm = BatchNormalization()(lstm)
lstm = Dense(1)(lstm)
self.model = Model(inputs=self.input, outputs=lstm)
self.model.compile(loss='mae', optimizer=Adam(lr=self.params['lr']), metrics=['mae'])
def learning_process(self, save_model=True, history=False, show_epochs=True):
verbose = 1 if show_epochs else 0
if history:
history = self.model.fit(self.model_data['x_train'],
self.model_data['y_train'],
batch_size=self.params['batch_size'],
epochs=int(self.params['epochs']),
verbose=1, # verbose = 1 if there if history = True
validation_data=(self.model_data['x_test'], self.model_data['y_test']),
shuffle=True)
else:
if save_model:
print("*"*5, "Fit Newcomers CLV Model", "*"*5)
self.model.fit(self.model_data['x_train'],
self.model_data['y_train'],
batch_size=self.params['batch_size'],
epochs=int(self.params['epochs']),
verbose=verbose,
validation_data=(self.model_data['x_test'], self.model_data['y_test']),
shuffle=True)
if save_model:
model_from_to_json(path=model_path(self.directory,
"trained_newcomers_model",
get_current_day(),
self.time_period),
weights_path=weights_path(self.directory,
"trained_newcomers_model",
get_current_day(),
self.time_period),
model=self.model,
is_writing=True)
if history:
return history
def train_execute(self):
print("*"*5, "Newcomer CLV Prediction train model process ", "*"*5)
if self.prev_model_date is None:
self.data_preparation()
self.parameter_tuning()
self.build_model()
self.learning_process()
else:
self.model = model_from_to_json(path=model_path(self.directory,
"trained_newcomers_model",
self.prev_model_date,
self.time_period),
weights_path=weights_path(self.directory,
"trained_newcomers_model",
self.prev_model_date,
self.time_period))
print("Previous model already exits in arrange__data_for_model the given directory '" + self.directory + "'.")
def prediction_execute(self):
print("*"*5, "PREDICTION", 5*"*")
if self.model is None:
_model_date = self.prev_model_date if self.prev_model_date is not None else get_current_day()
self.model = model_from_to_json(path=model_path(self.directory,
"trained_newcomers_model",
_model_date,
self.time_period),
weights_path=weights_path(self.directory,
"trained_newcomers_model",
_model_date,
self.time_period))
# daily calculations, day by day
while self.max_date < self.future_date:
print("date :", self.max_date)
self.model_data = arrange__data_for_model(self.data, [self.features], self.params)
self.build_model(prediction=True)
self.learning_process(save_model=False, history=False, show_epochs=False)
x = arrange__data_for_model(self.data, [self.features], self.params, is_prediction=True)
_pred = pd.DataFrame([{self.time_indicator: self.max_date, "order_count": self.model.predict(x)[0][-1]}])
self.data, self.results = pd.concat([self.data, _pred]), pd.concat([self.results, _pred])
self.max_date += datetime.timedelta(days=1)
del self.model_data, x, self.model
for i in ['min_' + self.features, 'max_' + self.features]:
self.results[i] = self.min_max[i]
self.results[self.features] = self.results.apply(
lambda row: self.get_actual_value(_min=row['min_' + self.features],
_max=row['max_' + self.features],
_value=row[self.features]), axis=1)
self.results[self.amount_indicator] = self.results[self.features] * self.average_amount
self.results[self.customer_indicator] = "newcomers"
self.results['data_type'] = "prediction"
self.results = self.results[['data_type', self.customer_indicator, self.time_indicator, self.amount_indicator]]
print("result file : ", get_result_data_path(self.directory, self.time_period, self.max_date))
| pd.concat([self.results, self.engaged_customers_results]) | pandas.concat |
import pandas as pd
import numpy as np
import pickle
import os
from librosa.core import load
from librosa.feature import melspectrogram
from librosa import power_to_db
from config import RAW_DATAPATH
class Data():
def __init__(self, genres, datapath):
self.raw_data = None
self.GENRES = genres
self.DATAPATH = datapath
print("\n-> Data() object is initialized.")
def make_raw_data(self):
records = list()
for i, genre in enumerate(self.GENRES):
GENREPATH = self.DATAPATH + genre + '/'
for j, track in enumerate(os.listdir(GENREPATH)):
TRACKPATH = GENREPATH + track
print("%d.%s\t\t%s (%d)" % (i + 1, genre, TRACKPATH, j + 1))
y, sr = load(TRACKPATH, mono=True)
S = melspectrogram(y, sr).T
S = S[:-1 * (S.shape[0] % 128)]
num_chunk = S.shape[0] / 128
data_chunks = np.split(S, num_chunk)
data_chunks = [(data, genre) for data in data_chunks]
records.append(data_chunks)
records = [data for record in records for data in record]
self.raw_data = | pd.DataFrame.from_records(records, columns=['spectrogram', 'genre']) | pandas.DataFrame.from_records |
#!/usr/bin/env python
r"""Test :py:class:`~solarwindpy.core.ions.Ion`.
"""
import pdb
# import re as re
import numpy as np
import pandas as pd
import unittest
# import sys
# import itertools
# from numbers import Number
# from pandas import MultiIndex as MI
# import numpy.testing as npt
import pandas.testing as pdt
from abc import ABC, abstractproperty
# from abc import abstractmethod, abstractstaticmethod, abstractclassmethod
# from unittest import TestCase
from scipy import constants
from scipy.constants import physical_constants
# try:
# import test_base as base
# except ImportError:
# from . import test_base as base
from solarwindpy.tests import test_base as base
from solarwindpy import vector
from solarwindpy import tensor
from solarwindpy import ions
pd.set_option("mode.chained_assignment", "raise")
class IonTestBase(ABC):
@classmethod
def set_object_testing(cls):
# print(cls.__class__, "set_object_testing", flush=True)
# print("Data", cls.data, sep="\n")
data = cls.data.xs(cls().species, axis=1, level="S")
w = data.w
coeff = pd.Series({"par": 1.0, "per": 2.0}) / 3.0
kwargs = dict(axis=1, level="C")
scalar = w.pow(2).multiply(coeff, **kwargs).sum(axis=1).pipe(np.sqrt)
scalar.name = ("w", "scalar")
data = pd.concat([data, scalar], axis=1, sort=True)
data.columns = pd.MultiIndex.from_tuples(data.columns, names=["M", "C"])
ion = ions.Ion(data, cls().species)
cls.object_testing = ion
cls.data = data
# print("Done with", cls.__class__, flush=True)
@abstractproperty
def species(self):
pass
@classmethod
def ion(cls):
return cls._ion
@property
def mass(self):
trans = {
"a": "alpha particle",
"p": "proton",
"p1": "proton",
"p2": "proton",
"e": "electron",
}
m = physical_constants["%s mass" % trans[self.species]][0]
return m
@property
def mass_in_mp(self):
trans = {
"a": physical_constants["alpha particle-proton mass ratio"][0],
"p": 1,
"p1": 1,
"p2": 1,
"e": physical_constants["electron-proton mass ratio"][0],
}
return trans[self.species]
def test_species(self):
self.assertEqual(self.species, self.object_testing.species)
def test_n(self):
n = self.data.loc[:, ("n", "")]
if not isinstance(n, pd.Series):
assert n.shape[1] == 1
n = n.iloc[:, 0]
n.name = "n"
ot = self.object_testing
pdt.assert_series_equal(n, ot.n)
pdt.assert_series_equal(n, ot.number_density)
pdt.assert_series_equal(ot.number_density, ot.n)
def test_mass_density(self):
mmp = self.mass_in_mp
rho = self.data.loc[:, pd.IndexSlice["n", ""]] * mmp
rho.name = self.species
if not isinstance(rho, pd.Series):
assert rho.shape[1] == 1
rho = rho.iloc[:, 0]
rho.name = "rho"
ot = self.object_testing
pdt.assert_series_equal(ot.rho, ot.mass_density)
pdt.assert_series_equal(rho, ot.rho)
pdt.assert_series_equal(rho, ot.mass_density)
def test_v(self):
v = vector.Vector(self.data.v)
ot = self.object_testing
self.assertEqual(v, ot.velocity)
self.assertEqual(v, ot.v)
self.assertEqual(ot.velocity, ot.v)
def test_w(self):
w = tensor.Tensor(self.data.w)
ot = self.object_testing
self.assertEqual(w, ot.thermal_speed)
self.assertEqual(w, ot.w)
self.assertEqual(ot.w, ot.thermal_speed)
def test_anisotropy(self):
w = self.data.w
ani = (w.per / w.par).pow(2)
ani.name = "RT"
ot = self.object_testing
| pdt.assert_series_equal(ani, ot.anisotropy) | pandas.testing.assert_series_equal |
from multiprocessing.pool import Pool
from typing import Tuple
from pandas import DataFrame
from sklearn.model_selection import train_test_split
from tensorflow.keras import Input
from tensorflow.keras.layers import Embedding, Lambda, Flatten, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.python.distribute.distribute_lib import Strategy
from src.models.RModel import RModel
class BPRModel(RModel):
def __init__(self):
super().__init__('BPRModel')
self._trainDf: DataFrame = None
self._productIds: list = []
self._results: list = []
@property
def results(self) -> list:
return self._results
@results.setter
def results(self, value: list):
self._results = value
@property
def productIds(self) -> list:
return self._productIds
@productIds.setter
def productIds(self, value: list):
self._productIds = value
@property
def trainDf(self) -> DataFrame:
return self._trainDf
@trainDf.setter
def trainDf(self, value: DataFrame):
self._trainDf = value
def compileModel(self, distributedConfig, numUser:int, numItem:int, numFactor:int) -> Tuple[Model, Strategy]:
userInput = Input((1,), name='customerId_input')
positiveItemInput = Input((1,), name='pProduct_input')
negativeItemInput = Input((1,), name='nProduct_input')
# One embedding layer is shared between positive and negative items
itemEmbeddingLayer = Embedding(numItem, numFactor, name='item_embedding', input_length=1)
positiveItemEmbedding = Flatten()(itemEmbeddingLayer(positiveItemInput))
negativeItemEmbedding = Flatten()(itemEmbeddingLayer(negativeItemInput))
userEmbedding = Embedding(numUser, numFactor, name='user_embedding', input_length=1)(userInput)
userEmbedding = Flatten()(userEmbedding)
tripletLoss = Lambda(self.bprTripletLoss, output_shape=self.outShape)([userEmbedding, positiveItemEmbedding, negativeItemEmbedding])
# loss = merge([positiveItemEmbedding, negativeItemEmbedding, userEmbedding], mode=self.bprTripletLoss, name='loss', output_shape=(1,))
self.model = Model(inputs=[userInput, positiveItemInput, negativeItemInput], outputs=tripletLoss)
# manual loss function
self.model.compile(loss=self.identityLoss, optimizer=Adam(1e-3))
# self.model.compile(Adam(1e-3), loss='mean_squared_error', metrics=RModel.METRICS)
return self.model, None
def train(self, path, rowLimit, metricDict:dict = None, distributedConfig=None):
self.batchSize = 64
numItem, numUser, transactionDf = self.readData(path, rowLimit)
# transactionDf.CUSTOMER_ID = transactionDf.CUSTOMER_ID.astype('category')
# transactionDf.CUSTOMER_ID = transactionDf.CUSTOMER_ID.cat.codes
# transactionDf.PRODUCT_ID = transactionDf.PRODUCT_ID.astype('category')
# transactionDf.PRODUCT_ID = transactionDf.PRODUCT_ID.cat.codes
self.trainDf, test = train_test_split(transactionDf, test_size=self.testSize)
dfTriplets = | pd.DataFrame(columns=['CUSTOMER_ID', 'pPRODUCT_ID', 'nPRODUCT_ID']) | pandas.DataFrame |
import pytest
import numpy as np
from datetime import date, timedelta, time, datetime
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndexLikeTimestamp(object):
# Tests for DatetimeIndex behaving like a vectorized Timestamp
def test_dti_date_out_of_range(self):
# see gh-1475
pytest.raises(ValueError, DatetimeIndex, ['1400-01-01'])
pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter',
'days_in_month', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
assert result == expected
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
class TestDatetimeIndex(object):
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc('2000-01-01', method='nearest',
tolerance=[pd.Timedelta('1day').to_timedelta64(),
pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
pd.Timedelta('1 hour').to_timedelta64(), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
'foo', ]
with pytest.raises(
ValueError, match='abbreviation w/o a number'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
assert '2000' in str(e)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
assert index is joined
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * offsets.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1],
freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assert_raises_regex(ValueError,
'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
| tm.assert_numpy_array_equal(arr, exp_arr) | pandas.util.testing.assert_numpy_array_equal |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 08:47:38 2018
@author: cenv0574
"""
import os
import json
import pandas as pd
import geopandas as gpd
from itertools import product
def load_config():
# Define current directory and data directory
config_path = os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'config.json')
)
with open(config_path, 'r') as config_fh:
config = json.load(config_fh)
return config
def load_table(data_path):
vnm_IO_path = os.path.join(data_path,"INPUT-OUTPUT TABLE 2012","IO Table 2012 English.xlsx")
return pd.read_excel(vnm_IO_path,sheet_name='IO_clean',index_col=0)
def load_sectors(data_path):
vnm_IO_path = os.path.join(data_path,"INPUT-OUTPUT TABLE 2012","IO Table 2012 English.xlsx")
vnmIO_rowcol = pd.read_excel(vnm_IO_path,sheet_name='SectorName')
return vnmIO_rowcol
def get_final_sector_classification():
return ['secA','secB','secC','secD','secE','secF','secG','secH','secI']
def map_sectors(vnm_IO_rowcol):
row_only = vnm_IO_rowcol[vnm_IO_rowcol['mapped'].str.contains("row") | vnm_IO_rowcol['mapped'].str.contains("sec") ]
col_only = vnm_IO_rowcol[vnm_IO_rowcol['mapped'].str.contains("col") | vnm_IO_rowcol['mapped'].str.contains("sec") ]
return dict(zip(row_only.code,row_only.mapped)),dict(zip(col_only.code,col_only.mapped))
def aggregate_table(vnm_IO,vnm_IO_rowcol,in_million=True):
sectors = get_final_sector_classification()
#aggregate table
mapper_row,mapper_col = map_sectors(vnm_IO_rowcol)
vnm_IO.index = vnm_IO.index.map(mapper_row.get)
vnm_IO.columns = vnm_IO.columns.to_series().map(mapper_col)
aggregated = vnm_IO.groupby(vnm_IO.index,axis=0).sum().groupby(vnm_IO.columns, axis=1).sum()
aggregated = aggregated.reindex(sectors+['col1','col2','col3'],axis='columns')
aggregated = aggregated.reindex(sectors+['row1','row2','row3'],axis='index')
if in_million == True:
return aggregated/1000000
else:
return aggregated
def is_balanced(io_table):
row = io_table.sum(axis=0)
col = io_table.sum(axis=1)
if ((row-col).sum() < 1):
print('Table is balanced')
def load_provincial_stats(data_path):
prov_path = os.path.join(data_path,'Vietnam_boundaries','boundaries_stats','province_level_stats.shp')
return gpd.read_file(prov_path)
def estimate_gva(regions,in_million=True):
if in_million == True:
return list(((regions.pro_nfirm*regions.laborcost)+(regions.pro_nfirm*regions.capital))/1000000)
else:
return list(((regions.pro_nfirm*regions.laborcost)+(regions.pro_nfirm*regions.capital)))
def create_proxies(data_path,notrade=False,own_production_ratio=0.9,min_rice=True):
provinces = load_provincial_stats(data_path)
provinces.name_eng = provinces.name_eng.apply(lambda x: x.replace(' ','_').replace('-','_'))
od_table = load_od(data_path,min_rice=min_rice)
create_indices(data_path,provinces,write_to_csv=True)
create_regional_proxy(data_path,provinces,write_to_csv=True)
create_sector_proxies(data_path,provinces,write_to_csv=True)
create_zero_proxies(data_path,od_table,notrade=notrade,write_to_csv=True)
if notrade == False:
create_level14_proxies(data_path,od_table,own_production_ratio,write_to_csv=True)
def create_regional_proxy(data_path,regions,write_to_csv=True):
regions['raw_gva'] = estimate_gva(regions) #regions['pro_nfirm']*regions['laborcost'] + regions['pro_nfirm']*regions['capital']
subset = regions.loc[:,['name_eng','raw_gva']]
subset['year'] = 2010
subset['raw_gva'] = subset.raw_gva.apply(int)/(subset['raw_gva'].sum(axis='index'))
subset = subset[['year','name_eng','raw_gva']]
subset.columns = ['year','id','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_reg_vnm.csv')
subset.to_csv(csv_path,index=False)
def create_indices(data_path,provinces,write_to_csv=True):
# prepare index and cols
region_names = list(provinces.name_eng)
rowcol_names = list(load_sectors(data_path)['mapped'].unique())
rows = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('row'))]*len(region_names)
region_names_list = [item for sublist in [[x]*12 for x in region_names] for item in sublist]
indices = pd.DataFrame([region_names_list,rows]).T
indices.columns = ['region','sector']
indices['sector'] = indices['sector'].apply(lambda x: x.replace('row','other'))
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','indices_mrio.csv')
indices.to_csv(csv_path,index=False)
def create_sector_proxies(data_path,regions,write_to_csv=True):
#list of sectors
sector_list = get_final_sector_classification()
#get own sector classification for region file
map_dict = map_sect_vnm_to_eng()
regions=regions.rename(columns = map_dict)
# get sectoral gva based on proportion of firms in the region
sector_shares = regions[sector_list].multiply(regions['raw_gva'],axis='index')
sector_shares.index = regions.name_eng
for sector in sector_list+['other1','other2','other3']:
if sector in ['other1','other2','other3']:
subset = pd.DataFrame(sector_shares.sum(axis='columns')).divide(pd.DataFrame(sector_shares.sum(axis='columns')).sum(axis='index'))
subset.columns = [sector]
else:
subset = pd.DataFrame(sector_shares.loc[:,sector]).divide(pd.DataFrame(sector_shares.loc[:,sector]).sum(axis='index'))
subset.reset_index(inplace=True,drop=False)
subset['year'] = 2010
subset['sector'] = sector+str(1)
subset[sector] = subset[sector].apply(lambda x: round(x,7))
subset = subset[['year','sector','name_eng',sector]]
subset.columns = ['year','sector','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_{}.csv'.format(sector))
subset.to_csv(csv_path,index=False)
def get_trade_value(x,sum_use,sector,own_production_ratio=0.9):
if x.Destination == x.Origin:
try:
return list(sum_use.loc[(sum_use['region'] == x.Destination) & (sum_use['sector'] == sector)]['value'])[0]*own_production_ratio
except:
return 1
elif x.gdp == 0:
return 0
else:
try:
return list(sum_use.loc[(sum_use['region'] == x.Destination) & (sum_use['sector'] == sector)]['value'])[0]*(1-own_production_ratio)*x.ratio
except:
return 0
def create_level14_proxies(data_path,od_table,own_production_ratio=0.9,write_to_csv=True):
# get sector list
sector_list_ini = get_final_sector_classification()+['other1','other2','other3']
sector_list = [x+str(1) for x in sector_list_ini]
od_table.loc[od_table['Destination'] == od_table['Origin'],'gdp'] = 10
od_sum = pd.DataFrame(od_table.groupby(['Destination','Origin']).sum().sum(axis=1))
od_sum['ratio'] = od_sum.groupby(level=0).apply(lambda x:
x / float(x.sum()))
od_sum.reset_index(inplace=True)
od_sum.columns = ['Destination','Origin','gdp','ratio']
df_pretable = pd.read_csv(os.path.join(data_path,'IO_analysis','MRIO_TABLE','notrade_trade.csv'),index_col=[0,1],header=[0,1])
df_pretable = df_pretable.iloc[:,:567]
sum_use = df_pretable.sum(axis=1)
sum_use = pd.DataFrame(sum_use*0.1)
sum_use.reset_index(inplace=True)
sum_use.columns = ['region','sector','value']
combine = []
for sector in sector_list:
if sector[:-1] in ['other1','other2','other3']:
subset = od_sum.copy()
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
subset.drop('ratio',axis=1,inplace=True)
combine.append(subset)
else:
subset = od_sum.copy()
subset = subset.loc[od_sum.gdp != 0]
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = subset.apply(lambda x: get_trade_value(x,sum_use,sector[:-1],own_production_ratio),axis=1) #subset['gdp'].apply(lambda x: round(x,2))
subset.drop('ratio',axis=1,inplace=True)
combine.append(subset)
all_ = pd.concat(combine)
final_sub = all_[['year','sector','Origin','Destination','gdp']]
final_sub.columns = ['year','sector','region','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_trade14_{}.csv'.format(sector[:-1]))
final_sub.to_csv(csv_path,index=False)
def create_zero_proxies(data_path,od_table,notrade=False,write_to_csv=True):
# get sector list
sector_list = get_final_sector_classification()+['other1','other2','other3']
sector_list = [x+str(1) for x in sector_list]
#map sectors to be the same
mapper = map_regions()
od_table['Destination'] = od_table['Destination'].apply(lambda x: mapper[x])
od_table['Origin'] = od_table['Origin'].apply(lambda x: mapper[x])
od_table = od_table.loc[od_table['Destination'] != od_table['Origin']]
od_sum = pd.DataFrame(od_table.groupby(['Destination','Origin']).sum().sum(axis=1))
od_sum.reset_index(inplace=True)
od_sum.columns = ['Destination','Origin','gdp']
if notrade == True:
od_sum['gdp'] = 0
for sector in sector_list:
if sector[:-1] in ['other1','other2','other3']:
subset = od_sum.copy()
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
combine = []
for sector2 in sector_list:
sub_subset = subset.copy()
sub_subset['subsector'] = sector2
combine.append(sub_subset)
else:
subset = od_sum.copy()
if notrade == False:
subset = subset.loc[od_sum.gdp == 0]
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
combine = []
for sector2 in sector_list:
sub_subset = subset.copy()
sub_subset['subsector'] = sector2
combine.append(sub_subset)
all_ = pd.concat(combine)
final_sub = all_[['year','sector','Origin','subsector','Destination','gdp']]
final_sub.columns = ['year','sector','region','sector','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_trade_{}.csv'.format(sector[:-1]))
final_sub.to_csv(csv_path,index=False)
def load_output(data_path,provinces,notrade=True):
# prepare index and cols
region_names = list(provinces.name_eng)
rowcol_names = list(load_sectors(data_path)['mapped'].unique())
rows = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('row'))]*len(region_names)
cols = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('col'))]*len(region_names)
region_names_list = [item for sublist in [[x]*12 for x in region_names] for item in sublist]
index_mi = pd.MultiIndex.from_arrays([region_names_list,rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list,cols], names=('region', 'col'))
# read output
if notrade == True:
output_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','output_notrade.csv')
else:
output_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','output.csv')
output_df = pd.read_csv(output_path,header=None)
output_df.index = index_mi
output_df.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in rowcol_names if x.startswith('sec')]*len(region_names)
col_only = [x for x in rowcol_names if x.startswith('col')]*len(region_names)
region_col = [item for sublist in [[x]*9 for x in region_names] for item in sublist] + [item for sublist in [[x]*3 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays([region_col,sector_only+col_only], names=('region', 'col'))
#sum va and imports
tax_sub = output_df.loc[output_df.index.get_level_values(1)=='row1'].sum(axis='index')
import_ = output_df.loc[output_df.index.get_level_values(1)=='row2'].sum(axis='index')
valueA = output_df.loc[output_df.index.get_level_values(1)=='row3'].sum(axis='index')
output_new = pd.concat([output_df.loc[~output_df.index.get_level_values(1).isin(['row1','row2','row3'])],pd.DataFrame(tax_sub).T,
pd.DataFrame(import_).T, | pd.DataFrame(valueA) | pandas.DataFrame |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import os
from pathlib import Path
library_path = str(Path(__file__).parent.parent.parent)
PYPATH = os.environ.get("PYTHONPATH", "").split(":")
if library_path not in PYPATH:
PYPATH.append(library_path)
os.environ["PYTHONPATH"] = ":".join(PYPATH)
from model_drift.data.padchest import PadChest
from model_drift.data.padchest import LABEL_MAP
from model_drift.drift.metrics.sampler import Sampler
from model_drift.drift.metrics.performance import ClassificationReportCalculator
from model_drift.drift.metrics import ChiSqDriftCalculator
from model_drift.drift.metrics.numeric import KSDriftCalculator, BasicDriftCalculator
from model_drift.drift.metrics import TabularDriftCalculator
from model_drift import settings, helpers
import warnings
import pandas as pd
import numpy as np
import argparse
logger = helpers.basic_logging()
def create_ood_dataframe(outside_data, pct, counts, start_date=None, end_date=None, shuffle=False):
# print(counts.index.min(), counts.index.max())
if start_date is None:
start_date = counts.index.min()
if end_date is None:
end_date = counts.index.max()
inject_index = pd.date_range(start_date, end_date, freq='D')
cl = helpers.CycleList(outside_data.index, shuffle=shuffle)
new_df = {}
counts = (counts * pct).apply(np.round).reindex(inject_index).fillna(0).astype(int)
for new_ix, count in counts.items():
ixes = cl.take(int(count))
new_df[new_ix] = outside_data.loc[ixes]
return | pd.concat(new_df, axis=0) | pandas.concat |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/6/16 15:28
Desc: 东方财富网-数据中心-特色数据-千股千评
http://data.eastmoney.com/stockcomment/
"""
from datetime import datetime
import pandas as pd
import requests
from tqdm import tqdm
def stock_comment_em() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评
http://data.eastmoney.com/stockcomment/
:return: 千股千评数据
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "SECURITY_CODE",
"sortTypes": "1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_DMSK_TS_STOCKNEW",
"quoteColumns": "f2~01~SECURITY_CODE~CLOSE_PRICE,f8~01~SECURITY_CODE~TURNOVERRATE,f3~01~SECURITY_CODE~CHANGE_RATE,f9~01~SECURITY_CODE~PE_DYNAMIC",
"columns": "ALL",
"filter": "",
"token": "<KEY>",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"代码",
"-",
"交易日",
"名称",
"-",
"-",
"-",
"最新价",
"涨跌幅",
"-",
"换手率",
"主力成本",
"市盈率",
"-",
"-",
"机构参与度",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"综合得分",
"上升",
"目前排名",
"关注指数",
"-",
]
big_df = big_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"换手率",
"市盈率",
"主力成本",
"机构参与度",
"综合得分",
"上升",
"目前排名",
"关注指数",
"交易日",
]
]
big_df["最新价"] = pd.to_numeric(big_df["最新价"], errors="coerce")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"], errors="coerce")
big_df["换手率"] = pd.to_numeric(big_df["换手率"], errors="coerce")
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"], errors="coerce")
big_df["主力成本"] = pd.to_numeric(big_df["主力成本"], errors="coerce")
big_df["机构参与度"] = pd.to_numeric(big_df["机构参与度"], errors="coerce")
big_df["综合得分"] = pd.to_numeric(big_df["综合得分"], errors="coerce")
big_df["上升"] = pd.to_numeric(big_df["上升"], errors="coerce")
big_df["目前排名"] = pd.to_numeric(big_df["目前排名"], errors="coerce")
big_df["关注指数"] = pd.to_numeric(big_df["关注指数"], errors="coerce")
big_df["交易日"] = pd.to_datetime(big_df["交易日"]).dt.date
return big_df
def stock_comment_detail_zlkp_jgcyd_em(symbol: str = "600000") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评-主力控盘-机构参与度
https://data.eastmoney.com/stockcomment/stock/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 主力控盘-机构参与度
:rtype: pandas.DataFrame
"""
url = f"https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"reportName": "RPT_DMSK_TS_STOCKEVALUATE",
"filter": f'(SECURITY_CODE="{symbol}")',
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"sortColumns": "TRADE_DATE",
"sortTypes": "-1",
"_": "1655387358195",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df = temp_df[["TRADE_DATE", "ORG_PARTICIPATE"]]
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df.sort_values(["date"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["value"] = pd.to_numeric(temp_df["value"]) * 100
return temp_df
def stock_comment_detail_zhpj_lspf_em(symbol: str = "600000") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评-综合评价-历史评分
https://data.eastmoney.com/stockcomment/stock/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 综合评价-历史评分
:rtype: pandas.DataFrame
"""
url = f"https://data.eastmoney.com/stockcomment/api/{symbol}.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(
[
data_json["ApiResults"]["zhpj"]["HistoryScore"]["XData"],
data_json["ApiResults"]["zhpj"]["HistoryScore"]["Ydata"]["Score"],
data_json["ApiResults"]["zhpj"]["HistoryScore"]["Ydata"]["Price"],
]
).T
temp_df.columns = ["日期", "评分", "股价"]
temp_df["日期"] = str(datetime.now().year) + "-" + temp_df["日期"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df.sort_values(["日期"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["评分"] = pd.to_numeric(temp_df["评分"])
temp_df["股价"] = pd.to_numeric(temp_df["股价"])
return temp_df
def stock_comment_detail_scrd_focus_em(symbol: str = "600000") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评-市场热度-用户关注指数
https://data.eastmoney.com/stockcomment/stock/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 市场热度-用户关注指数
:rtype: pandas.DataFrame
"""
url = f"https://data.eastmoney.com/stockcomment/api/{symbol}.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(
[
data_json["ApiResults"]["scrd"]["focus"][1]["XData"],
data_json["ApiResults"]["scrd"]["focus"][1]["Ydata"]["StockFocus"],
data_json["ApiResults"]["scrd"]["focus"][1]["Ydata"]["ClosePrice"],
]
).T
temp_df.columns = ["日期", "用户关注指数", "收盘价"]
temp_df["日期"] = str(datetime.now().year) + "-" + temp_df["日期"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df.sort_values(["日期"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["用户关注指数"] = pd.to_numeric(temp_df["用户关注指数"])
temp_df["收盘价"] = pd.to_numeric(temp_df["收盘价"])
return temp_df
def stock_comment_detail_scrd_desire_em(
symbol: str = "600000",
) -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评-市场热度-市场参与意愿
https://data.eastmoney.com/stockcomment/stock/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 市场热度-市场参与意愿
:rtype: pandas.DataFrame
"""
url = f"https://data.eastmoney.com/stockcomment/api/{symbol}.json"
r = requests.get(url)
data_json = r.json()
date_str = (
data_json["ApiResults"]["scrd"]["desire"][0][0]["UpdateTime"]
.split(" ")[0]
.replace("/", "-")
)
temp_df = pd.DataFrame(
[
data_json["ApiResults"]["scrd"]["desire"][1]["XData"],
data_json["ApiResults"]["scrd"]["desire"][1]["Ydata"][
"MajorPeopleNumChg"
],
data_json["ApiResults"]["scrd"]["desire"][1]["Ydata"][
"PeopleNumChange"
],
data_json["ApiResults"]["scrd"]["desire"][1]["Ydata"][
"RetailPeopleNumChg"
],
]
).T
temp_df.columns = ["日期时间", "大户", "全部", "散户"]
temp_df["日期时间"] = date_str + " " + temp_df["日期时间"]
temp_df["日期时间"] = pd.to_datetime(temp_df["日期时间"])
temp_df.sort_values(["日期时间"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["大户"] = pd.t | o_numeric(temp_df["大户"]) | pandas.to_numeric |
import networkx as nx
import pandas as pd
import numpy as np
import networkx as nx
from nltk.tokenize import word_tokenize
from .build_network import *
def character_density(book_path):
'''
number of central characters divided by the total number of words in a novel
Parameters
----------
book_path : string (required)
path to txt file containing full text of book to be analysed
Returns
-------
density : float
number of characters in book / number of words in book
'''
book = load_book(book_path)
book_length = len(word_tokenize(book))
book_graph = nx.from_pandas_dataframe(bookworm(book_path),
source='source',
target='target')
n_characters = len(book_graph.nodes())
return n_characters / book_length
def split_book(book, n_sections=10, cumulative=True):
'''
Split a book into n equal parts, with optional cumulative aggregation
Parameters
----------
book : string (required)
the book to be split
n_sections : (optional)
the number of sections which we want to split our book into
cumulative : bool (optional)
If true, the returned sections will be cumulative, ie all
will start at the book's beginning and end at evenly distributed
points throughout the book
Returns
-------
split_book : list
the given book split into the specified number of even (or, if
cumulative is set to True, uneven) sections
'''
book_sequences = get_sentence_sequences(book)
split_book = np.array_split(np.array(book_sequences), n_sections)
if cumulative is True:
split_book = [np.concatenate(split_book[:pos + 1])
for pos, section in enumerate(split_book)]
return split_book
def chronological_network(book_path, n_sections=10, cumulative=True):
'''
Split a book into n equal parts, with optional cumulative aggregation, and
return a dictionary of assembled character graphs
Parameters
----------
book_path : string (required)
path to the .txt file containing the book to be split
n_sections : (optional)
the number of sections which we want to split our book into
cumulative : bool (optional)
If true, the returned sections will be cumulative, ie all will start at
the book's beginning and end at evenly distributed points throughout
the book
Returns
-------
graph_dict : dict
a dictionary containing the graphs of each split book section
keys = section index
values = nx.Graph describing the character graph in the specified book
section
'''
book = load_book(book_path)
sections = split_book(book, n_sections, cumulative)
graph_dict = {}
for i, section in enumerate(sections):
characters = extract_character_names(' '.join(section))
df = find_connections(sequences=section, characters=characters)
cooccurence = calculate_cooccurence(df)
interaction_df = get_interaction_df(cooccurence, threshold=2)
graph_dict[i] = nx.from_pandas_dataframe(interaction_df,
source='source',
target='target')
return graph_dict
def select_k(spectrum):
'''
Returns k, where the top k eigenvalues of the graph's laplacian describe 90
percent of the graph's complexiities.
Parameters
----------
spectrum : type (required optional)
the laplacian spectrum of the graph in question
Returns
-------
k : int
denotes the top k eigenvalues of the graph's laplacian spectrum,
explaining 90 percent of its complexity (or containing 90 percent of
its energy)
'''
if sum(spectrum) == 0:
return len(spectrum)
running_total = 0
for i in range(len(spectrum)):
running_total += spectrum[i]
if (running_total / sum(spectrum)) >= 0.9:
return i + 1
return len(spectrum)
def graph_similarity(graph_1, graph_2):
'''
Computes the similarity of two graphs based on their laplacian spectra,
returning a value between 0 and inf where a score closer to 0 is indicative
of a more similar network
Parameters
----------
graph_1 : networkx.Graph (required)
graph_2 : networkx.Graph (required)
Returns
-------
similarity : float
the similarity score of the two graphs where a value closer to 0 is
indicative of a more similar pair of networks
'''
laplacian_1 = nx.spectrum.laplacian_spectrum(graph_1)
laplacian_2 = nx.spectrum.laplacian_spectrum(graph_2)
k_1 = select_k(laplacian_1)
k_2 = select_k(laplacian_2)
k = min(k_1, k_2)
return sum((laplacian_1[:k] - laplacian_2[:k])**2)
def comparison_df(graph_dict):
'''
takes an assortment of novels and computes their simlarity, based on their
laplacian spectra
Parameters
----------
graph_dict : dict (required)
keys = book title
values = character graph
Returns
-------
comparison : pandas.DataFrame
columns = book titles
indexes = book titles
values = measure of the character graph similarity of books
'''
books = list(graph_dict.keys())
comparison = {book_1: {book_2: graph_similarity(graph_dict[book_1],
graph_dict[book_2])
for book_2 in books} for book_1 in books}
return | pd.DataFrame(comparison) | pandas.DataFrame |
import numpy as np
np.random.seed(0)
import pandas as pd
def highlight_nan(data: pd.DataFrame, color: str) -> pd.DataFrame:
attr = f'background-color: {color}'
is_nan = pd.isna(data)
return pd.DataFrame(
np.where(is_nan, attr, ''),
index=data.index,
columns=data.columns
)
def df_info(df: pd.DataFrame) -> None:
return df.style.apply(
highlight_nan,
color='darkorange',
axis=None
)
if __name__ == '__main__':
data = np.array([1, np.nan, 3, 4])
print("Sum of data is", np.nansum(data))
df = pd.DataFrame(np.random.randn(5, 3), index=["a", "b", "c", "f", "h"], columns=["one", "two", "three"])
df["one"]["a"] = None
df["two"]["f"] = None
print(pd.isna(df))
print("\nCleared df:\n", df.dropna(axis="rows"))
print("Sum of column two in df is", df["two"].sum())
num_samples = 100
index = | pd.date_range("21/4/2021", periods=num_samples) | pandas.date_range |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from dask.utils import raises
import dask.dataframe as dd
from dask.dataframe.utils import eq, assert_dask_graph
def groupby_internal_repr():
pdf = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7, 8, 9, 10],
'y': list('abcbabbcda')})
ddf = dd.from_pandas(pdf, 3)
gp = pdf.groupby('y')
dp = ddf.groupby('y')
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._pd, pd.core.groupby.DataFrameGroupBy)
assert isinstance(dp.obj, dd.DataFrame)
assert eq(dp.obj, gp.obj)
gp = pdf.groupby('y')['x']
dp = ddf.groupby('y')['x']
assert isinstance(dp, dd.groupby.SeriesGroupBy)
assert isinstance(dp._pd, pd.core.groupby.SeriesGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.Series)
assert eq(dp.obj, gp.obj)
gp = pdf.groupby('y')[['x']]
dp = ddf.groupby('y')[['x']]
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._pd, pd.core.groupby.DataFrameGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.DataFrame)
assert eq(dp.obj, gp.obj)
gp = pdf.groupby(pdf.y)['x']
dp = ddf.groupby(ddf.y)['x']
assert isinstance(dp, dd.groupby.SeriesGroupBy)
assert isinstance(dp._pd, pd.core.groupby.SeriesGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.Series)
assert eq(dp.obj, gp.obj)
gp = pdf.groupby(pdf.y)[['x']]
dp = ddf.groupby(ddf.y)[['x']]
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._pd, pd.core.groupby.DataFrameGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.DataFrame)
assert eq(dp.obj, gp.obj)
def groupby_error():
pdf = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7, 8, 9, 10],
'y': list('abcbabbcda')})
ddf = dd.from_pandas(pdf, 3)
with tm.assertRaises(KeyError):
ddf.groupby('A')
with tm.assertRaises(KeyError):
ddf.groupby(['x', 'A'])
dp = ddf.groupby('y')
msg = 'Column not found: '
with tm.assertRaisesRegexp(KeyError, msg):
dp['A']
with tm.assertRaisesRegexp(KeyError, msg):
dp[['x', 'A']]
def groupby_internal_head():
pdf = pd.DataFrame({'A': [1, 2] * 10,
'B': np.random.randn(20),
'C': np.random.randn(20)})
ddf = dd.from_pandas(pdf, 3)
assert eq(ddf.groupby('A')._head().sum(),
pdf.head().groupby('A').sum())
assert eq(ddf.groupby(ddf['A'])._head().sum(),
pdf.head().groupby(pdf['A']).sum())
assert eq(ddf.groupby(ddf['A'] + 1)._head().sum(),
pdf.head().groupby(pdf['A'] + 1).sum())
def test_full_groupby():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
assert raises(Exception, lambda: d.groupby('does_not_exist'))
assert raises(Exception, lambda: d.groupby('a').does_not_exist)
assert 'b' in dir(d.groupby('a'))
def func(df):
df['b'] = df.b - df.b.mean()
return df
assert eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
def test_groupby_dir():
df = pd.DataFrame({'a': range(10), 'b c d e': range(10)})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby('a')
assert 'a' in dir(g)
assert 'b c d e' not in dir(g)
def test_groupby_on_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
e = d.set_index('a')
efull = full.set_index('a')
assert eq(d.groupby('a').b.mean(), e.groupby(e.index).b.mean())
def func(df):
df.loc[:, 'b'] = df.b - df.b.mean()
return df
assert eq(d.groupby('a').apply(func).set_index('a'),
e.groupby(e.index).apply(func))
assert eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
assert eq(d.groupby('a').apply(func).set_index('a'),
full.groupby('a').apply(func).set_index('a'))
assert eq(efull.groupby(efull.index).apply(func),
e.groupby(e.index).apply(func))
def test_groupby_multilevel_getitem():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
cases = [(ddf.groupby('a')['b'], df.groupby('a')['b']),
(ddf.groupby(['a', 'b']), df.groupby(['a', 'b'])),
(ddf.groupby(['a', 'b'])['c'], df.groupby(['a', 'b'])['c']),
(ddf.groupby('a')[['b', 'c']], df.groupby('a')[['b', 'c']]),
(ddf.groupby('a')[['b']], df.groupby('a')[['b']]),
(ddf.groupby(['a', 'b', 'c']), df.groupby(['a', 'b', 'c']))]
for d, p in cases:
assert isinstance(d, dd.groupby._GroupBy)
assert isinstance(p, pd.core.groupby.GroupBy)
assert eq(d.sum(), p.sum())
assert eq(d.min(), p.min())
assert eq(d.max(), p.max())
assert eq(d.count(), p.count())
assert eq(d.mean(), p.mean().astype(float))
def test_groupby_multilevel_agg():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
sol = df.groupby(['a']).mean()
res = ddf.groupby(['a']).mean()
assert eq(res, sol)
sol = df.groupby(['a', 'c']).mean()
res = ddf.groupby(['a', 'c']).mean()
assert eq(res, sol)
def test_groupby_get_group():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 6], 'b': [4, 2, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 2, 6], 'b': [3, 3, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [4, 3, 7], 'b': [1, 1, 3]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
for ddkey, pdkey in [('b', 'b'), (d.b, full.b),
(d.b + 1, full.b + 1)]:
ddgrouped = d.groupby(ddkey)
pdgrouped = full.groupby(pdkey)
# DataFrame
assert eq(ddgrouped.get_group(2), pdgrouped.get_group(2))
assert eq(ddgrouped.get_group(3), pdgrouped.get_group(3))
# Series
assert eq(ddgrouped.a.get_group(3), pdgrouped.a.get_group(3))
assert eq(ddgrouped.a.get_group(2), pdgrouped.a.get_group(2))
def test_dataframe_groupby_nunique():
strings = list('aaabbccccdddeee')
data = np.random.randn(len(strings))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert eq(s.groupby('strings')['data'].nunique(), expected)
def test_dataframe_groupby_nunique_across_group_same_value():
strings = list('aaabbccccdddeee')
data = list(map(int, '123111223323412'))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert eq(s.groupby('strings')['data'].nunique(), expected)
def test_series_groupby_propagates_names():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
ddf = dd.from_pandas(df, 2)
func = lambda df: df['y'].sum()
result = ddf.groupby('x').apply(func, columns='y')
expected = df.groupby('x').apply(func)
expected.name = 'y'
assert eq(result, expected)
def test_series_groupby():
s = pd.Series([1, 2, 2, 1, 1])
pd_group = s.groupby(s)
ss = dd.from_pandas(s, npartitions=2)
dask_group = ss.groupby(ss)
pd_group2 = s.groupby(s + 1)
dask_group2 = ss.groupby(ss + 1)
for dg, pdg in [(dask_group, pd_group), (pd_group2, dask_group2)]:
assert eq(dg.count(), pdg.count())
assert eq(dg.sum(), pdg.sum())
assert eq(dg.min(), pdg.min())
assert eq(dg.max(), pdg.max())
def test_series_groupby_errors():
s = pd.Series([1, 2, 2, 1, 1])
ss = dd.from_pandas(s, npartitions=2)
msg = "Grouper for '1' not 1-dimensional"
with tm.assertRaisesRegexp(ValueError, msg):
s.groupby([1, 2]) # pandas
with tm.assertRaisesRegexp(ValueError, msg):
ss.groupby([1, 2]) # dask should raise the same error
msg = "Grouper for '2' not 1-dimensional"
with tm.assertRaisesRegexp(ValueError, msg):
s.groupby([2]) # pandas
with | tm.assertRaisesRegexp(ValueError, msg) | pandas.util.testing.assertRaisesRegexp |
''' ALL DIFFERENT MILP MODELS IN ONE FILE
1. MILP WITHOUT MG -> Doesn't consider the Microgrid option and works with only 1 type of cable. However, it considers reliability.
2. MILP2 -> Consider
3. MILP3
4. MILP4
5. MILP5
'''
from __future__ import division
from pyomo.opt import SolverFactory
from pyomo.core import AbstractModel
from pyomo.dataportal.DataPortal import DataPortal
from pyomo.environ import *
import pandas as pd
from datetime import datetime
import os
def MILP_without_MG(gisele_folder,case_study,n_clusters,coe,voltage,resistance,reactance,Pmax,line_cost):
############ Create abstract model ###########
model = AbstractModel()
data = DataPortal()
MILP_input_folder = gisele_folder + '/Case studies/' + case_study + '/Intermediate/Optimization/MILP_input'
MILP_output_folder = gisele_folder + '/Case studies/' + case_study + '/Intermediate/Optimization/MILP_output'
os.chdir(MILP_input_folder)
# Define some basic parameter for the per unit conversion and voltage limitation
Abase = 1
Vmin = 0.9
####################Define sets#####################
model.N = Set()
data.load(filename='nodes.csv', set=model.N) # first row is not read
model.N_clusters = Set()
data.load(filename='nodes_clusters.csv', set=model.N_clusters)
model.N_PS = Set()
data.load(filename='nodes_PS.csv', set=model.N_PS)
# Node corresponding to primary substation
# Allowed connections
model.links = Set(dimen=2) # in the csv the values must be delimited by commas
data.load(filename='links_all.csv', set=model.links)
model.links_clusters = Set(dimen=2)
data.load(filename='links_clusters.csv', set=model.links_clusters)
model.links_decision = Set(dimen=2)
data.load(filename='links_decision.csv', set=model.links_decision)
# Nodes are divided into two sets, as suggested in https://pyomo.readthedocs.io/en/stable/pyomo_modeling_components/Sets.html:
# NodesOut[nodes] gives for each node all nodes that are connected to it via outgoing links
# NodesIn[nodes] gives for each node all nodes that are connected to it via ingoing links
def NodesOut_init(model, node):
retval = []
for (i, j) in model.links:
if i == node:
retval.append(j)
return retval
model.NodesOut = Set(model.N, initialize=NodesOut_init)
def NodesIn_init(model, node):
retval = []
for (i, j) in model.links:
if j == node:
retval.append(i)
return retval
model.NodesIn = Set(model.N, initialize=NodesIn_init)
#####################Define parameters#####################
# Electric power in the nodes (injected (-) or absorbed (+))
model.Psub = Param(model.N_clusters)
data.load(filename='power_nodes.csv', param=model.Psub)
model.ps_cost = Param(model.N_PS)
data.load(filename='PS_costs.csv', param=model.ps_cost)
model.PSmax = Param(model.N_PS)
data.load(filename='PS_power_max.csv', param=model.PSmax)
model.PS_voltage = Param(model.N_PS)
data.load(filename='PS_voltage.csv', param=model.PS_voltage)
model.PS_distance = Param(model.N_PS)
data.load(filename='PS_distance.csv', param=model.PS_distance)
# Connection distance of all the edges
model.dist = Param(model.links)
data.load(filename='distances.csv', param=model.dist)
model.weights = Param(model.links_decision)
data.load(filename='weights_decision_lines.csv', param=model.weights)
# Electrical parameters of all the cables
model.V_ref = Param(initialize=voltage)
model.A_ref = Param(initialize=Abase)
model.E_min = Param(initialize=Vmin)
model.R_ref = Param(initialize=resistance)
model.X_ref = Param(initialize=reactance)
model.P_max = Param(initialize=Pmax)
model.cf = Param(initialize=line_cost)
model.Z = Param(initialize=model.R_ref + model.X_ref * 0.5)
model.Z_ref = Param(initialize=model.V_ref ** 2 / Abase)
model.n_clusters = Param(initialize=n_clusters)
model.coe = Param(initialize=coe)
#####################Define variables#####################
# binary variable x[i,j]: 1 if the connection i,j is present, 0 otherwise
model.x = Var(model.links_decision, within=Binary)
# power[i,j] is the power flow of connection i-j
model.P = Var(model.links)
# positive variables E(i) is p.u. voltage at each node
model.E = Var(model.N, within=NonNegativeReals)
# binary variable k[i]: 1 if node i is a primary substation, 0 otherwise
model.k = Var(model.N_PS, within=Binary)
# Power output of Primary substation
model.PPS = Var(model.N_PS, within=NonNegativeReals)
model.positive_p = Var(model.links_clusters, within=Binary)
model.Distance = Var(model.N, within=Reals)
model.cable_type = Var(model.links)
#####################Define constraints###############################
# Radiality constraint
def Radiality_rule(model):
return summation(model.x) == model.n_clusters
model.Radiality = Constraint(rule=Radiality_rule)
# Power flow constraints
def Power_flow_conservation_rule(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j] for j in model.NodesOut[node])) == model.Psub[node]
model.Power_flow_conservation = Constraint(model.N_clusters, rule=Power_flow_conservation_rule)
def Power_flow_conservation_rule3(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j] for j in model.NodesOut[node])) == - model.PPS[node]
model.Power_flow_conservation3 = Constraint(model.N_PS, rule=Power_flow_conservation_rule3)
def Power_upper_decision(model, i, j):
return model.P[i, j] <= model.P_max * model.x[i, j]
model.Power_upper_decision = Constraint(model.links_decision, rule=Power_upper_decision)
def Power_lower_decision(model, i, j):
return model.P[i, j] >= 0 # -model.P_max*model.x[i,j]
model.Power_lower_decision = Constraint(model.links_decision, rule=Power_lower_decision)
def Power_upper_clusters(model, i, j):
return model.P[i, j] <= model.P_max * model.positive_p[i, j]
model.Power_upper_clusters = Constraint(model.links_clusters, rule=Power_upper_clusters)
def Power_lower_clusters(model, i, j):
return model.P[i, j] >= 0 # -model.P_max*model.x[i,j]
model.Power_lower_clusters = Constraint(model.links_clusters, rule=Power_lower_clusters)
# Voltage constraints
def Voltage_balance_rule(model, i, j):
return (model.E[i] - model.E[j]) + model.x[i, j] - 1 <= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule = Constraint(model.links_decision, rule=Voltage_balance_rule)
def Voltage_balance_rule2(model, i, j):
return (model.E[i] - model.E[j]) - model.x[i, j] + 1 >= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule2 = Constraint(model.links_decision, rule=Voltage_balance_rule2)
def Voltage_balance_rule3(model, i, j):
return (model.E[i] - model.E[j]) + model.positive_p[i, j] - 1 <= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule3 = Constraint(model.links_clusters, rule=Voltage_balance_rule3)
def Voltage_balance_rule4(model, i, j):
return (model.E[i] - model.E[j]) - model.positive_p[i, j] + 1 >= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule4 = Constraint(model.links_clusters, rule=Voltage_balance_rule4)
def Voltage_limit(model, i):
return model.E[i] >= model.k[i] * (model.PS_voltage[i] - model.E_min) + model.E_min
model.Voltage_limit = Constraint(model.N_PS, rule=Voltage_limit)
def Voltage_PS2(model, i):
return model.E[i] <= model.PS_voltage[i]
model.Voltage_PS2 = Constraint(model.N_PS, rule=Voltage_PS2)
def Voltage_limit_clusters2(model, i):
return model.E[i] >= model.E_min
model.Voltage_limit_clusters2 = Constraint(model.N_clusters, rule=Voltage_limit_clusters2)
def PS_power_rule_upper(model, i):
return model.PPS[i] <= model.PSmax[i] * model.k[i]
model.PS_power_upper = Constraint(model.N_PS, rule=PS_power_rule_upper)
def distance_from_PS(model, i):
return model.Distance[i] <= -model.PS_distance[i]
model.distance_from_PS = Constraint(model.N_PS, rule=distance_from_PS)
def distance_from_PS2(model, i):
return model.Distance[i] >= (model.k[i] - 1) * 200 - model.PS_distance[i] * model.k[i]
model.distance_from_PS2 = Constraint(model.N_PS, rule=distance_from_PS2)
def distance_balance_decision(model, i, j):
return model.Distance[i] - model.Distance[j] + 1000 * (model.x[i, j] - 1) <= model.dist[i, j] / 1000
model.distance_balance_decision = Constraint(model.links_decision, rule=distance_balance_decision)
def distance_balance_decision2(model, i, j):
return (model.Distance[i] - model.Distance[j]) - 1000 * (model.x[i, j] - 1) >= model.dist[i, j] / 1000
model.distance_balance_decision2 = Constraint(model.links_decision, rule=distance_balance_decision2)
def distance_balance_clusters(model, i, j):
return model.Distance[i] - model.Distance[j] + 1000 * (model.positive_p[i, j] - 1) <= model.dist[i, j] / 1000
model.distance_balance_clusters = Constraint(model.links_clusters, rule=distance_balance_clusters)
def distance_balance_clusters2(model, i, j):
return (model.Distance[i] - model.Distance[j]) - 1000 * (model.positive_p[i, j] - 1) >= model.dist[i, j] / 1000
model.distance_balance_clusters2 = Constraint(model.links_clusters, rule=distance_balance_clusters2)
def Balance_rule(model):
return (sum(model.PPS[i] for i in model.N_PS) - sum(model.Psub[i] for i in model.N_clusters)) == 0
model.Balance = Constraint(rule=Balance_rule)
def anti_paralel(model, i, j):
return model.x[i, j] + model.x[j, i] <= 1
model.anti_paralel = Constraint(model.links_decision, rule=anti_paralel)
def anti_paralel_clusters(model, i, j):
return model.positive_p[i, j] + model.positive_p[j, i] == 1
model.anti_paralel_clusters = Constraint(model.links_clusters, rule=anti_paralel_clusters)
####################Define objective function##########################
####################Define objective function##########################
reliability_index = 1000
def ObjectiveFunction(model):
return summation(model.weights, model.x) * model.cf / 1000 + summation(model.ps_cost, model.k)
# return summation(model.weights, model.x) * model.cf / 1000 + summation(model.ps_cost,model.k) - sum(model.Psub[i]*model.Distance[i] for i in model.N_clusters)*reliability_index
model.Obj = Objective(rule=ObjectiveFunction, sense=minimize)
#############Solve model##################
instance = model.create_instance(data)
print('Instance is constructed:', instance.is_constructed())
# opt = SolverFactory('cbc',executable=r'C:\Users\Asus\Desktop\POLIMI\Thesis\GISELE\Gisele_MILP\cbc')
opt = SolverFactory('gurobi')
opt.options['TimeLimit'] = 300
# opt.options['numericfocus']=0
# opt.options['mipgap'] = 0.0002
# opt.options['presolve']=2
# opt.options['mipfocus']=2
# opt = SolverFactory('cbc',executable=r'C:\Users\Asus\Desktop\POLIMI\Thesis\GISELE\New folder\cbc')
print('Starting optimization process')
time_i = datetime.now()
opt.solve(instance, tee=True, symbolic_solver_labels=True)
time_f = datetime.now()
print('Time required for optimization is', time_f - time_i)
links = instance.x
power = instance.P
subs = instance.k
voltage = instance.E
PS = instance.PPS
DISTANCE = instance.Distance
links_clusters = instance.links_clusters
# voltage_drop=instance.z
connections_output = pd.DataFrame(columns=[['id1', 'id2', 'power']])
PrSubstation = pd.DataFrame(columns=[['index', 'power']])
all_lines = pd.DataFrame(columns=[['id1', 'id2', 'power']])
Voltages = pd.DataFrame(columns=[['index', 'voltage [p.u]']])
Links_Clusters = pd.DataFrame(columns=[['id1', 'id2', 'power']])
distance = pd.DataFrame(columns=[['index', 'length[km]']])
k = 0
for index in links:
if int(round(value(links[index]))) == 1:
connections_output.loc[k, 'id1'] = index[0]
connections_output.loc[k, 'id2'] = index[1]
connections_output.loc[k, 'power'] = value(power[index])
k = k + 1
k = 0
for index in subs:
if int(round(value(subs[index]))) == 1:
PrSubstation.loc[k, 'index'] = index
PrSubstation.loc[k, 'power'] = value(PS[index])
print(((value(PS[index]))))
k = k + 1
k = 0
for v in voltage:
Voltages.loc[k, 'index'] = v
Voltages.loc[k, 'voltage [p.u]'] = value(voltage[v])
k = k + 1
k = 0
for index in power:
all_lines.loc[k, 'id1'] = index[0]
all_lines.loc[k, 'id2'] = index[1]
all_lines.loc[k, 'power'] = value(power[index])
k = k + 1
k = 0
for index in links_clusters:
Links_Clusters.loc[k, 'id1'] = index[0]
Links_Clusters.loc[k, 'id2'] = index[1]
Links_Clusters.loc[k, 'power'] = value(power[index])
k = k + 1
k = 0
for dist in DISTANCE:
distance.loc[k, 'index'] = dist
distance.loc[k, 'length[m]'] = value(DISTANCE[dist])
k = k + 1
Links_Clusters.to_csv(MILP_output_folder + '/links_clusters.csv', index=False)
connections_output.to_csv(MILP_output_folder + '/connections_output.csv', index=False)
PrSubstation.to_csv(MILP_output_folder + '/PrimarySubstations.csv', index=False)
Voltages.to_csv(MILP_output_folder + '/Voltages.csv', index=False)
all_lines.to_csv(MILP_output_folder + '/all_lines.csv', index=False)
distance.to_csv(MILP_output_folder + '/Distances.csv', index=False)
def MILP_MG_reliability(gisele_folder,case_study,n_clusters,coe,voltage,resistance,reactance,Pmax,line_cost):
model = AbstractModel()
data = DataPortal()
MILP_input_folder = gisele_folder + '/Case studies/' + case_study + '/Intermediate/Optimization/MILP_input'
MILP_output_folder = gisele_folder + '/Case studies/' + case_study + '/Intermediate/Optimization/MILP_output'
os.chdir(MILP_input_folder)
# Define some basic parameter for the per unit conversion and voltage limitation
Abase = 1
Vmin = 0.9
# ####################Define sets#####################
# Name of all the nodes (primary and secondary substations)
model.N = Set()
data.load(filename='nodes.csv', set=model.N) # first row is not read
model.N_clusters = Set()
data.load(filename='nodes_clusters.csv', set=model.N_clusters)
model.N_MG = Set()
data.load(filename='microgrids_nodes.csv', set=model.N_MG)
model.N_PS = Set()
data.load(filename='nodes_PS.csv', set=model.N_PS)
# Node corresponding to primary substation
# Allowed connections
model.links = Set(dimen=2) # in the csv the values must be delimited by commas
data.load(filename='links_all.csv', set=model.links)
model.links_clusters = Set(dimen=2)
data.load(filename='links_clusters.csv', set=model.links_clusters)
model.links_decision = Set(dimen=2)
data.load(filename='links_decision.csv', set=model.links_decision)
# Nodes are divided into two sets, as suggested in https://pyomo.readthedocs.io/en/stable/pyomo_modeling_components/Sets.html:
# NodesOut[nodes] gives for each node all nodes that are connected to it via outgoing links
# NodesIn[nodes] gives for each node all nodes that are connected to it via ingoing links
def NodesOut_init(model, node):
retval = []
for (i, j) in model.links:
if i == node:
retval.append(j)
return retval
model.NodesOut = Set(model.N, initialize=NodesOut_init)
def NodesIn_init(model, node):
retval = []
for (i, j) in model.links:
if j == node:
retval.append(i)
return retval
model.NodesIn = Set(model.N, initialize=NodesIn_init)
#####################Define parameters#####################
# Electric power in the nodes (injected (-) or absorbed (+))
model.Psub = Param(model.N_clusters)
data.load(filename='power_nodes.csv', param=model.Psub)
# model.PS=Param(model.N)
# data.load(filename='PS.csv',param=model.PS)
model.microgrid_power = Param(model.N_MG)
data.load(filename='microgrids_powers.csv', param=model.microgrid_power)
model.energy = Param(model.N_MG)
data.load(filename='energy.csv', param=model.energy)
model.mg_cost = Param(model.N_MG)
data.load(filename='microgrids_costs.csv', param=model.mg_cost)
model.ps_cost = Param(model.N_PS)
data.load(filename='PS_costs.csv', param=model.ps_cost)
model.PSmax = Param(model.N_PS)
data.load(filename='PS_power_max.csv', param=model.PSmax)
# Power of the primary substation as sum of all the other powers
# def PPS_init(model):
# return sum(model.Psub[i] for i in model.N)
# model.PPS=Param(model.PS,initialize=PPS_init)
# Connection distance of all the edges
model.dist = Param(model.links)
data.load(filename='distances.csv', param=model.dist)
model.weights = Param(model.links_decision)
data.load(filename='weights_decision_lines.csv', param=model.weights)
# Electrical parameters of all the cables
model.V_ref = Param()
model.A_ref = Param()
model.R_ref = Param()
model.X_ref = Param()
model.P_max = Param()
model.cf = Param()
model.cPS = Param()
model.E_min = Param()
model.PPS_max = Param()
model.PPS_min = Param()
model.Z = Param()
model.Z_ref = Param()
model.n_clusters = Param()
model.coe = Param()
data.load(filename='data2.dat')
#####################Define variables#####################
# binary variable x[i,j]: 1 if the connection i,j is present, 0 otherwise
model.x = Var(model.links_decision, within=Binary)
# power[i,j] is the power flow of connection i-j
model.P = Var(model.links)
# positive variables E(i) is p.u. voltage at each node
model.E = Var(model.N, within=NonNegativeReals)
# microgrid
model.z = Var(model.N_MG, within=Binary)
# binary variable k[i]: 1 if node i is a primary substation, 0 otherwise
model.k = Var(model.N_PS, within=Binary)
# Power output of Primary substation
model.PPS = Var(model.N_PS, within=NonNegativeReals)
model.MG_output = Var(model.N_MG)
model.positive_p = Var(model.links_clusters, within=Binary)
model.Distance = Var(model.N, within=Reals)
#####################Define constraints###############################
# def Make_problem_easy(model,i,j):
# return model.x[i,j]+model.weights[i,j]>=1
# model.easy = Constraint(model.links, rule=Make_problem_easy)
def Radiality_rule(model):
# return summation(model.x)==len(model.N)-summation(model.k)
return summation(model.x) == model.n_clusters
model.Radiality = Constraint(rule=Radiality_rule)
def Radiality_rule(model):
return summation(model.k) + summation(model.z) <= model.n_clusters
def Power_flow_conservation_rule(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j] for j in model.NodesOut[node])) == model.Psub[node]
model.Power_flow_conservation = Constraint(model.N_clusters, rule=Power_flow_conservation_rule)
def Power_flow_conservation_rule2(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j] for j in model.NodesOut[node])) == - model.MG_output[node]
model.Power_flow_conservation2 = Constraint(model.N_MG, rule=Power_flow_conservation_rule2)
def Power_flow_conservation_rule3(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j] for j in model.NodesOut[node])) == - model.PPS[node]
model.Power_flow_conservation3 = Constraint(model.N_PS, rule=Power_flow_conservation_rule3)
def Power_upper_decision(model, i, j):
return model.P[i, j] <= model.P_max * model.x[i, j]
model.Power_upper_decision = Constraint(model.links_decision, rule=Power_upper_decision)
def Power_lower_decision(model, i, j):
return model.P[i, j] >= 0 # -model.P_max*model.x[i,j]
model.Power_lower_decision = Constraint(model.links_decision, rule=Power_lower_decision)
def Power_upper_clusters(model, i, j):
return model.P[i, j] <= model.P_max * model.positive_p[i, j]
model.Power_upper_clusters = Constraint(model.links_clusters, rule=Power_upper_clusters)
def Power_lower_clusters(model, i, j):
return model.P[i, j] >= 0 # -model.P_max*model.x[i,j]
model.Power_lower_clusters = Constraint(model.links_clusters, rule=Power_lower_clusters)
# Voltage constraints
def Voltage_balance_rule(model, i, j):
return (model.E[i] - model.E[j]) + model.x[i, j] - 1 <= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule = Constraint(model.links_decision, rule=Voltage_balance_rule)
def Voltage_balance_rule2(model, i, j):
return (model.E[i] - model.E[j]) - model.x[i, j] + 1 >= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule2 = Constraint(model.links_decision, rule=Voltage_balance_rule2)
def Voltage_balance_rule3(model, i, j):
return (model.E[i] - model.E[j]) + model.positive_p[i, j] - 1 <= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule3 = Constraint(model.links_clusters, rule=Voltage_balance_rule3)
def Voltage_balance_rule4(model, i, j):
return (model.E[i] - model.E[j]) - model.positive_p[i, j] + 1 >= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule4 = Constraint(model.links_clusters, rule=Voltage_balance_rule4)
def Voltage_limit(model, i):
return model.E[i] >= model.k[i] * (1 - model.E_min) + model.E_min
model.Voltage_limit = Constraint(model.N_PS, rule=Voltage_limit)
def Voltage_PS2(model, i):
return model.E[i] <= 1
model.Voltage_PS2 = Constraint(model.N_PS, rule=Voltage_PS2)
def Voltage_limit_MG(model, i):
return model.E[i] <= 1
model.Voltage_limit_MG = Constraint(model.N_MG, rule=Voltage_limit_MG)
def Voltage_limit_MG2(model, i):
return model.E[i] >= model.z[i] * (1 - model.E_min) + model.E_min
model.Voltage_limit_MG2 = Constraint(model.N_MG, rule=Voltage_limit_MG2)
def Voltage_limit_clusters2(model, i):
return model.E[i] >= model.E_min
model.Voltage_limit_clusters2 = Constraint(model.N_clusters, rule=Voltage_limit_clusters2)
def PS_power_rule_upper(model, i):
return model.PPS[i] <= model.PSmax[i] * model.k[i]
model.PS_power_upper = Constraint(model.N_PS, rule=PS_power_rule_upper)
def distance_from_PS(model, i):
return model.Distance[i] <= 0
model.distance_from_PS = Constraint(model.N_PS, rule=distance_from_PS)
def distance_from_PS2(model, i):
return model.Distance[i] >= (model.k[i] - 1) * 100
model.distance_from_PS2 = Constraint(model.N_PS, rule=distance_from_PS2)
def distance_from_MG(model, i):
return model.Distance[i] <= 0
model.distance_from_MG = Constraint(model.N_MG, rule=distance_from_MG)
def distance_from_MG2(model, i):
return model.Distance[i] >= (model.z[i] - 1) * 100 # length must be <100km in this case
model.distance_from_MG2 = Constraint(model.N_MG, rule=distance_from_MG2)
def distance_balance_decision(model, i, j):
return model.Distance[i] - model.Distance[j] + 1000 * (model.x[i, j] - 1) <= model.dist[i, j] / 1000
model.distance_balance_decision = Constraint(model.links_decision, rule=distance_balance_decision)
def distance_balance_decision2(model, i, j):
return (model.Distance[i] - model.Distance[j]) - 1000 * (model.x[i, j] - 1) >= model.dist[i, j] / 1000
model.distance_balance_decision2 = Constraint(model.links_decision, rule=distance_balance_decision2)
def distance_balance_clusters(model, i, j):
return model.Distance[i] - model.Distance[j] + 1000 * (model.positive_p[i, j] - 1) <= model.dist[i, j] / 1000
model.distance_balance_clusters = Constraint(model.links_clusters, rule=distance_balance_clusters)
def distance_balance_clusters2(model, i, j):
return (model.Distance[i] - model.Distance[j]) - 1000 * (model.positive_p[i, j] - 1) >= model.dist[i, j] / 1000
model.distance_balance_clusters2 = Constraint(model.links_clusters, rule=distance_balance_clusters2)
def Balance_rule(model):
return (sum(model.PPS[i] for i in model.N_PS) + sum(model.MG_output[i] for i in model.N_MG) - sum(
model.Psub[i] for i in model.N_clusters)) == 0
model.Balance = Constraint(rule=Balance_rule)
def MG_power_limit(model, i):
return model.MG_output[i] == model.z[i] * model.microgrid_power[i]
model.MG_power_limit = Constraint(model.N_MG, rule=MG_power_limit)
def anti_paralel(model, i, j):
return model.x[i, j] + model.x[j, i] <= 1
model.anti_paralel = Constraint(model.links_decision, rule=anti_paralel)
def anti_paralel_clusters(model, i, j):
return model.positive_p[i, j] + model.positive_p[j, i] == 1
model.anti_paralel_clusters = Constraint(model.links_clusters, rule=anti_paralel_clusters)
####################Define objective function##########################
def ObjectiveFunction(model):
# return summation(model.weights, model.x) * model.cf / 1000
return summation(model.weights, model.x) * model.cf / 1000 + summation(model.mg_cost, model.z) * 1000 + sum(
model.energy[i] * (1 - model.z[i]) for i in model.N_MG) * model.coe
# + summation(model.ps_cost,model.k)
# +sum((model.P[i]/model.A_ref)**2*0.5*1.25*model.R_ref/model.Z_ref*model.dist[i]/1000*24*365*20 for i in model.links)
# return summation(model.dist,model.x)*model.cf/1000 + summation(model.k) *model.cPS
model.Obj = Objective(rule=ObjectiveFunction, sense=minimize)
#############Solve model##################
instance = model.create_instance(data)
print('Instance is constructed:', instance.is_constructed())
# opt = SolverFactory('cbc',executable=r'C:\Users\Asus\Desktop\POLIMI\Thesis\GISELE\Gisele_MILP\cbc')
opt = SolverFactory('gurobi')
# opt.options['numericfocus']=0
# opt.options['mipgap'] = 0.0002
# opt.options['presolve']=2
# opt.options['mipfocus']=2
# opt = SolverFactory('cbc',executable=r'C:\Users\Asus\Desktop\POLIMI\Thesis\GISELE\New folder\cbc')
print('Starting optimization process')
time_i = datetime.now()
opt.solve(instance, tee=True, symbolic_solver_labels=True)
time_f = datetime.now()
print('Time required for optimization is', time_f - time_i)
links = instance.x
power = instance.P
subs = instance.k
voltage = instance.E
PS = instance.PPS
mg_output = instance.MG_output
microGrid = instance.z
DISTANCE = instance.Distance
links_clusters = instance.links_clusters
# voltage_drop=instance.z
connections_output = pd.DataFrame(columns=[['id1', 'id2', 'power']])
PrSubstation = pd.DataFrame(columns=[['index', 'power']])
all_lines = pd.DataFrame(columns=[['id1', 'id2', 'power']])
Voltages = pd.DataFrame(columns=[['index', 'voltage [p.u]']])
Microgrid = pd.DataFrame(columns=[['index', 'microgrid', 'power']])
distance = pd.DataFrame(columns=[['index', 'length[km]']])
Links_Clusters = pd.DataFrame(columns=[['id1', 'id2', 'power']])
k = 0
for index in links:
if int(round(value(links[index]))) == 1:
connections_output.loc[k, 'id1'] = index[0]
connections_output.loc[k, 'id2'] = index[1]
connections_output.loc[k, 'power'] = value(power[index])
k = k + 1
k = 0
for index in subs:
if int(round(value(subs[index]))) == 1:
PrSubstation.loc[k, 'index'] = index
PrSubstation.loc[k, 'power'] = value(PS[index])
print(((value(PS[index]))))
k = k + 1
k = 0
for v in voltage:
Voltages.loc[k, 'index'] = v
Voltages.loc[k, 'voltage [p.u]'] = value(voltage[v])
k = k + 1
k = 0
for index in power:
all_lines.loc[k, 'id1'] = index[0]
all_lines.loc[k, 'id2'] = index[1]
all_lines.loc[k, 'power'] = value(power[index])
k = k + 1
k = 0
for index in mg_output:
Microgrid.loc[k, 'index'] = index
Microgrid.loc[k, 'microgrid'] = value(microGrid[index])
Microgrid.loc[k, 'power'] = value(mg_output[index])
k = k + 1
k = 0
for dist in DISTANCE:
distance.loc[k, 'index'] = dist
distance.loc[k, 'length[m]'] = value(DISTANCE[dist])
k = k + 1
k = 0
for index in links_clusters:
Links_Clusters.loc[k, 'id1'] = index[0]
Links_Clusters.loc[k, 'id2'] = index[1]
Links_Clusters.loc[k, 'power'] = value(power[index])
k = k + 1
k = 0
for dist in DISTANCE:
distance.loc[k, 'index'] = dist
distance.loc[k, 'length[m]'] = value(DISTANCE[dist])
k = k + 1
Links_Clusters.to_csv(MILP_output_folder + '/links_clusters.csv', index=False)
connections_output.to_csv(MILP_output_folder + '/connections_output.csv', index=False)
PrSubstation.to_csv(MILP_output_folder + '/PrimarySubstations.csv', index=False)
Voltages.to_csv(MILP_output_folder + '/Voltages.csv', index=False)
all_lines.to_csv(MILP_output_folder + '/all_lines.csv', index=False)
Microgrid.to_csv(MILP_output_folder + '/Microgrid.csv', index=False)
distance.to_csv(MILP_output_folder + '/Distances.csv', index=False)
def MILP_multiobjective(p_max_lines, coe, nation_emis, nation_rel, line_rel,input_michele):
# useful parameters from michele
proj_lifetime = input_michele['num_years']
nren = 3
# Initialize model
model = AbstractModel()
data = DataPortal()
# Define sets
model.of = Set(initialize=['cost', 'emis', 'rel']) # Set of objective functions
model.N = Set() # Set of all nodes, clusters and substations
data.load(filename=r'Output/LCOE/set.csv', set=model.N)
model.clusters = Set() # Set of clusters
data.load(filename='Output/LCOE/clusters.csv', set=model.clusters)
model.renfr = RangeSet(0, nren - 1,1) # set of microgrids with different ren fractions
model.mg = Set(dimen=2, within=model.clusters * model.renfr)
model.substations = Set() # Set of substations
data.load(filename='Output/LCOE/subs.csv', set=model.substations)
model.links = Set(dimen=2,within=model.N * model.N) # in the csv the values must be delimited by commas
data.load(filename='Output/LCOE/possible_links_complete.csv',set=model.links)
# Nodes are divided into two sets, as suggested in https://pyomo.readthedocs.io/en/stable/pyomo_modeling_components/Sets.html:
# NodesOut[nodes] gives for each node all nodes that are connected to it via outgoing links
# NodesIn[nodes] gives for each node all nodes that are connected to it via ingoing links
def NodesOut_init(model, node):
retval = []
for (i, j) in model.links:
if i == node:
retval.append(j)
return retval
model.NodesOut = Set(model.N, initialize=NodesOut_init)
def NodesIn_init(model, node):
retval = []
for (i, j) in model.links:
if j == node:
retval.append(i)
return retval
model.NodesIn = Set(model.N, initialize=NodesIn_init)
def NodesOutSub_init(model, node):
retval = []
for (i, j) in model.links:
if i == node:
retval.append(j)
return retval
model.NodesOutSub = Set(model.substations, initialize=NodesOutSub_init)
def NodesInSub_init(model, node):
retval = []
for (i, j) in model.links:
if j == node:
retval.append(i)
return retval
model.NodesInSub = Set(model.substations, initialize=NodesInSub_init)
#####################Define parameters#####################
# Direction of optimization for each objective function: -1 to minimize, +1 to maximize
model.dir = Param(model.of)
# Weight of objective functions in multi-objective optimization, range 0-1, sum over model.of has to be 1
model.weight = Param(model.of)
data.load(filename='Output/LCOE/data_MO.dat')
# Parameters identifying the range of variation of each objective function (needed for normalization)
model.min_obj = Param(model.of, initialize=0, mutable=True)
model.max_obj = Param(model.of, initialize=1, mutable=True)
# Electric power in the nodes (injected (-) or absorbed (+))
model.p_clusters = Param(model.clusters)
data.load(filename='Output/LCOE/c_power.csv', param=model.p_clusters)
# Maximum power supplied by substations
model.p_max_substations = Param(model.substations)
data.load(filename='Output/LCOE/sub_power.csv',
param=model.p_max_substations)
# Total net present cost of microgrid to supply each cluster
model.c_microgrids = Param(model.mg)
# data.load(filename='Output/LCOE/c_npc.csv', param=model.c_microgrids)
# Total net present cost of substations
model.c_substations = Param(model.substations)
data.load(filename='Output/LCOE/sub_npc.csv',
param=model.c_substations)
# Connection cost of the possible links
model.c_links = Param(model.links)
data.load(filename='Output/LCOE/cost_links_complete.csv',
param=model.c_links)
# Energy consumed by each cluster in microgrid lifetime
model.energy = Param(model.mg)
# data.load(filename='Output/LCOE/energy.csv', param=model.energy)
# CO2 emission produced by each cluster in microgrid lifetime
model.emission = Param(model.mg)
# data.load(filename='Output/LCOE/emissions.csv', param=model.emission)
# CO2 emission related to construction of power infrastructure
model.em_links = Param(model.links)
data.load(filename='Output/LCOE/em_links.csv', param=model.em_links)
# lol due to microgrid components_failure
model.rel_mg = Param(model.mg)
# data.load(filename='Output/LCOE/mg_rel.csv', param=model.rel_mg)
# Connection length associated to the nodes
# todo->put the real length
model.d_nodes = Param(model.N)
data.load(filename='Output/LCOE/len_nodes.csv', param=model.d_nodes)
# Connection length of the possible links
model.d_links = Param(model.links)
data.load(filename='Output/LCOE/len_links_complete.csv',
param=model.d_links)
# poximum power flowing on lines
# model.p_max_lines = Param() # max power flowing on MV lines
# data.load(filename='Input/data_procedure2.dat')
# M_max and M_min, values required to linearize the problem
model.M_max = Param(initialize=10000)
model.M_min = Param(initialize=-10000)
data.load(filename='Output/Microgrids/microgrids.csv',
select=(
'Cluster', 'Renewable fraction index', 'Total Cost [kEUR]',
'Energy Demand [MWh]', 'CO2 [kg]', 'Unavailability [MWh/y]'),
param=(model.c_microgrids, model.energy, model.emission,
model.rel_mg), index=model.mg)
#####################Define variables#####################
# objective function variables
model.obj = Var(model.of, within=NonNegativeReals)
# auxiliary variables for normalization step
model.aux = Var(model.of)
# normalized objective functions
model.norm_obj = Var(model.of, within=NonNegativeReals)
# binary variable x[i,j]: 1 if the connection i,j is present, 0 otherwise,initialize=x_rule
model.x = Var(model.links, within=Binary)
# binary variable y[i]: 1 if a substation is installed in node i, 0 otherwise,initialize=y_rule
model.y = Var(model.substations, within=Binary)
# binary variable z[i]: 1 if a microgrid is installed in node i, 0 otherwise,initialize=z_rule
model.z = Var(model.mg, within=Binary)
# power[i,j] is the power flow of connection i-j
model.P = Var(model.links, within=NonNegativeReals)
# power[i] is the power provided by substation i
model.p_substations = Var(model.substations, within=NonNegativeReals)
# # variables k(i,j) is the variable necessary to linearize
model.k = Var(model.links)
# distance of cluster from substation
model.dist = Var(model.N, within=NonNegativeReals)
# lol due to MV lines
model.lol_line = Var(model.clusters, within=NonNegativeReals)
#####################Define constraints###############################
def Radiality_rule(model):
return summation(model.x) == len(model.clusters) - summation(
model.z)
model.Radiality = Constraint(
rule=Radiality_rule) # all the clusters are either connected to the MV grid or powered by microgrid
def Power_flow_conservation_rule(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j]
for j in model.NodesOut[node])) == +model.p_clusters[node] * (
1 - sum(model.z[node, j] for j in model.renfr))
model.Power_flow_conservation = Constraint(model.clusters,
rule=Power_flow_conservation_rule) # when the node is powered by SHS all the power is transferred to the outgoing link
def PS_Power_flow_conservation_rule(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j]
for j in model.NodesOut[node])) == -model.p_substations[node]
model.PS_Power_flow_conservation = Constraint(model.substations,
rule=PS_Power_flow_conservation_rule) # outgoing power from PS to connected links
def Power_upper_bounds_rule(model, i, j):
return model.P[i, j] <= p_max_lines * model.x[i, j]
model.upper_Power_limits = Constraint(model.links,
rule=Power_upper_bounds_rule) # limit to power flowing on MV lines
def Power_lower_bounds_rule(model, i, j):
return model.P[i, j] >= -p_max_lines * model.x[i, j]
model.lower_Power_limits = Constraint(model.links,
rule=Power_lower_bounds_rule)
def Primary_substation_upper_bound_rule(model, i):
return model.p_substations[i] <= model.p_max_substations[i] * \
model.y[i]
model.Primary_substation_upper_bound = Constraint(model.substations,
rule=Primary_substation_upper_bound_rule) # limit to power of PS
def Number_substations_rule(model, node):
return model.y[node] <= sum(
model.x[j, node] for j in model.NodesInSub[node]) + \
sum(model.x[node, j] for j in model.NodesOutSub[node])
model.Number_substation = Constraint(model.substations,
rule=Number_substations_rule)
def Limit_mg_rule(model, i):
return sum(model.z[i, j] for j in model.renfr) <= 1
model.Limit_mg = Constraint(model.clusters, rule=Limit_mg_rule)
#### Distance constraints #####
def distance_balance_rule(model, i, j):
return model.k[i, j] == (-model.d_links[i, j] - model.d_nodes[i]) * \
model.x[i, j]
model.distance_balance_rule = Constraint(model.links,
rule=distance_balance_rule)
def distance_linearization_rule_1(model, i, j):
return model.k[i, j] <= model.M_max * model.x[i, j]
model.distance_linearization_rule_1 = Constraint(model.links,
rule=distance_linearization_rule_1)
#
def distance_linearization_rule_2(model, i, j):
return model.k[i, j] >= model.M_min * model.x[i, j]
model.distance_linearization_rule_2 = Constraint(model.links,
rule=distance_linearization_rule_2)
#
def distance_linearization_rule_3(model, i, j):
return model.dist[i] - model.dist[j] - (
1 - model.x[i, j]) * model.M_max <= \
model.k[i, j]
model.distance_linearization_rule_3 = Constraint(model.links,
rule=distance_linearization_rule_3)
def distance_linearization_rule_4(model, i, j):
return model.dist[i] - model.dist[j] - (
1 - model.x[i, j]) * model.M_min >= \
model.k[i, j]
model.distance_linearization_rule_4 = Constraint(model.links,
rule=distance_linearization_rule_4)
def distance_linearization_rule_5(model, i, j):
return model.dist[i] - model.dist[j] + (
1 - model.x[i, j]) * model.M_max >= \
model.k[i, j]
model.distance_linearization_rule_5 = Constraint(model.links,
rule=distance_linearization_rule_5)
# if a cluster is electrified with a microgrid, its distance is 0,
# otherwise it must be less than a max treshold
def distance_upper_bound_rule(model, i, j):
return 100 * (1 - model.z[i, j]) >= model.dist[i]
model.distance_upper_bound = Constraint(model.clusters, model.renfr,
rule=distance_upper_bound_rule)
def distance_primary_substation_rule(model, i):
return model.dist[i] == 0
model.distance_primary_substation = Constraint(model.substations,
rule=distance_primary_substation_rule)
# define loss of load dependent on distance from connection point
def lol_calculation_rule(model, i):
return model.lol_line[i] == model.dist[i] * line_rel * \
model.energy[i, 1] / 8760 / proj_lifetime
model.lol_calculation = Constraint(model.clusters,
rule=lol_calculation_rule)
####################Define objective function##########################
# total npc over microgrid lifetime
def ObjectiveFunctionCost(model):
return model.obj['cost'] == summation(model.c_microgrids, model.z) \
+ summation(model.c_substations, model.y) + summation(
model.c_links, model.x) \
+ sum(model.energy[i, 1] * (
1 - sum(model.z[i, j] for j in model.renfr)) for i in
model.clusters) * coe
model.Obj1 = Constraint(rule=ObjectiveFunctionCost)
# total direct emissions over microgrid lifetime
def ObjectiveFunctionEmis(model):
return model.obj['emis'] == summation(model.emission, model.z) + \
summation(model.em_links, model.x) + \
sum(model.energy[i, 1] * (
1 - sum(model.z[i, j] for j in model.renfr)) for
i in model.clusters) * nation_emis
model.Obj2 = Constraint(rule=ObjectiveFunctionEmis)
# minimization of total energy not supplied [MWh]
def ObjectiveFunctionRel(model):
return model.obj['rel'] == \
sum(model.rel_mg[i] * (model.z[i]) for i in model.mg) + \
summation(model.lol_line) + \
sum(model.energy[i, 1] / 8760 / proj_lifetime * (
1 - sum(model.z[i, j] for j in model.renfr)) for
i in model.clusters) * nation_rel
model.Obj3 = Constraint(rule=ObjectiveFunctionRel)
# auxiliary variable to allow the activation and deactivation of OF in the for loop
def AuxiliaryNorm(model, of):
return model.aux[of] == model.dir[of] * model.obj[of]
model.AuxNorm = Constraint(model.of, rule=AuxiliaryNorm)
# aux is null for the OF not optimized in the loop
def NullAuxiliary(model, of):
return model.aux[of] == 0
model.NullAux = Constraint(model.of, rule=NullAuxiliary)
# objective function for identification of ranges of objective functions (needed for normalization)
def ObjectiveFunctionNorm(model):
return sum(model.aux[of] for of in model.of)
model.ObjNorm = Objective(rule=ObjectiveFunctionNorm, sense=maximize)
# normalized objective functions
def DefineNormalizedObj(model, of):
if model.dir[of] == 1:
return model.norm_obj[of] == (
model.obj[of] - model.min_obj[of]) / (
model.max_obj[of] - model.min_obj[of])
else:
return model.norm_obj[of] == (
model.max_obj[of] - model.obj[of]) / (
model.max_obj[of] - model.min_obj[of])
model.DefNormObj = Constraint(model.of, rule=DefineNormalizedObj)
# multi-objective optimization through weighted sum approach
def MultiObjective(model):
return summation(model.weight, model.norm_obj)
model.MultiObj = Objective(rule=MultiObjective, sense=maximize)
#############Solve model##################
# opt = SolverFactory('cplex',executable=r'C:\Users\silvi\IBM\ILOG\CPLEX_Studio1210\cplex\bin\x64_win64\cplex')
# opt = SolverFactory('glpk')
opt = SolverFactory('gurobi')
opt.options['mipgap'] = 0.01
instance = model.create_instance(data)
print('Instance is constructed:', instance.is_constructed())
obj_list = list(instance.of) # list of the objective functions
print(obj_list)
num_of = len(obj_list) # number of objective functions
# payoff_table = np.empty((num_of,num_of)) # table of the ranges of variations of objective functions
payoff_table = pd.DataFrame(index=obj_list, columns=obj_list)
payoff_table.index.name = 'optimization'
# for the first step, ObjNorm is the OF to be used
instance.MultiObj.deactivate()
instance.ObjNorm.activate()
instance.DefNormObj.deactivate()
print(
'1) Optimizing one objective function at a time to identify ranges of variations')
time_i = datetime.now()
for of in obj_list:
# for of in instance.of:
print('Optimize ' + of)
instance.NullAux.activate()
instance.NullAux[of].deactivate()
instance.AuxNorm.deactivate()
instance.AuxNorm[of].activate()
opt.solve(instance, tee=True)
payoff_of = []
for i in obj_list:
p_of = float(instance.obj.get_values()[i])
payoff_of.append(p_of)
payoff_table.loc[of, :] = payoff_of
print(payoff_table)
multi_obj = True
k = 0
print('Find ranges of variation of each objective function:')
for of in obj_list:
instance.min_obj[of] = min(payoff_table[of])
instance.max_obj[of] = max(payoff_table[of])
print('min' + str(of) + '=' + str(min(payoff_table[of])))
print('max' + str(of) + '=' + str(max(payoff_table[of])))
# do not make multiobjective optimization if there is a unique solution
# that means if all objective functions do not change
if instance.min_obj[of] == instance.max_obj[of]:
k = k + 1
if k == num_of:
multi_obj = False
print('Multi-obj not needed')
# for the second step, MultiObj is the OF to be used
instance.NullAux.deactivate()
instance.AuxNorm.deactivate()
instance.ObjNorm.deactivate()
instance.MultiObj.activate()
instance.DefNormObj.activate()
if multi_obj:
print('2) Multi-objective optimization: Weighted sum approach')
opt.solve(instance, tee=True)
for of in obj_list:
print(str(of) + '=' + str(instance.obj.get_values()[of]))
time_f = datetime.now()
print('Time required for the two steps is', time_f - time_i)
###################Process results#######################
links = instance.x
power = instance.P
microgrids = instance.z
distance = instance.dist
lol_line = instance.lol_line
connections_output = pd.DataFrame(columns=[['id1', 'id2']])
microgrids_output = pd.DataFrame(
columns=['Cluster', 'Renewable fraction'])
power_output = pd.DataFrame(columns=[['id1', 'id2', 'P']])
dist_output = pd.DataFrame(columns=[['ID', 'dist', 'lol']])
k = 0
for index in links:
if int(round(value(links[index]))) == 1:
connections_output.loc[k, 'id1'] = index[0]
connections_output.loc[k, 'id2'] = index[1]
k = k + 1
k = 0
for index in microgrids:
if int(round(value(microgrids[index]))) == 1:
microgrids_output.loc[k, 'Cluster'] = index[0]
microgrids_output.loc[k, 'Renewable fraction'] = index[1]
k = k + 1
k = 0
for index in power:
if value(power[index]) != 0:
power_output.loc[k, 'id1'] = index[0]
power_output.loc[k, 'id2'] = index[1]
power_output.loc[k, 'P'] = value(power[index])
k = k + 1
k = 0
for index in distance:
if value(distance[index]) != 0:
dist_output.loc[k, 'ID'] = index
dist_output.loc[k, 'dist'] = value(distance[index])
dist_output.loc[k, 'lol'] = value(lol_line[index])
k = k + 1
connections_output.to_csv('Output/LCOE/MV_connections_output.csv',
index=False)
microgrids_output.to_csv('Output/LCOE/MV_SHS_output.csv', index=False)
power_output.to_csv('Output/LCOE/MV_power_output.csv', index=False)
dist_output.to_csv('Output/LCOE/MV_dist_output.csv', index=False)
return microgrids_output, connections_output
def MILP_MG_noRel(gisele_folder,case_study,n_clusters,coe,voltage,resistance,reactance,Pmax,line_cost):
model = AbstractModel()
data = DataPortal()
MILP_input_folder = gisele_folder + '/Case studies/' + case_study + '/Intermediate/Optimization/MILP_input'
MILP_output_folder = gisele_folder + '/Case studies/' + case_study + '/Intermediate/Optimization/MILP_output'
os.chdir(MILP_input_folder)
# ####################Define sets#####################
# Define some basic parameter for the per unit conversion and voltage limitation
Abase = 1
Vmin = 0.9
# Name of all the nodes (primary and secondary substations)
model.N = Set()
data.load(filename='nodes.csv', set=model.N) # first row is not read
model.N_clusters = Set()
data.load(filename='nodes_clusters.csv', set=model.N_clusters)
model.N_MG = Set()
data.load(filename='microgrids_nodes.csv', set=model.N_MG)
model.N_PS = Set()
data.load(filename='nodes_PS.csv', set=model.N_PS)
# Node corresponding to primary substation
# Allowed connections
model.links = Set(dimen=2) # in the csv the values must be delimited by commas
data.load(filename='links_all.csv', set=model.links)
model.links_clusters = Set(dimen=2)
data.load(filename='links_clusters.csv', set=model.links_clusters)
model.links_decision = Set(dimen=2)
data.load(filename='links_decision.csv', set=model.links_decision)
# Nodes are divided into two sets, as suggested in https://pyomo.readthedocs.io/en/stable/pyomo_modeling_components/Sets.html:
# NodesOut[nodes] gives for each node all nodes that are connected to it via outgoing links
# NodesIn[nodes] gives for each node all nodes that are connected to it via ingoing links
def NodesOut_init(model, node):
retval = []
for (i, j) in model.links:
if i == node:
retval.append(j)
return retval
model.NodesOut = Set(model.N, initialize=NodesOut_init)
def NodesIn_init(model, node):
retval = []
for (i, j) in model.links:
if j == node:
retval.append(i)
return retval
model.NodesIn = Set(model.N, initialize=NodesIn_init)
#####################Define parameters#####################
# Electric power in the nodes (injected (-) or absorbed (+))
model.Psub = Param(model.N_clusters)
data.load(filename='power_nodes.csv', param=model.Psub)
# model.PS=Param(model.N)
# data.load(filename='PS.csv',param=model.PS)
model.microgrid_power = Param(model.N_MG)
data.load(filename='microgrids_powers.csv', param=model.microgrid_power)
model.energy = Param(model.N_MG)
data.load(filename='energy.csv', param=model.energy)
# TODO also calculate npv
model.mg_cost = Param(model.N_MG)
data.load(filename='microgrids_costs.csv', param=model.mg_cost)
# TODO also calculate npv
model.ps_cost = Param(model.N_PS)
data.load(filename='PS_costs.csv', param=model.ps_cost)
model.PSmax = Param(model.N_PS)
data.load(filename='PS_power_max.csv', param=model.PSmax)
model.PS_voltage = Param(model.N_PS)
data.load(filename='PS_voltage.csv', param=model.PS_voltage)
# Power of the primary substation as sum of all the other powers
# def PPS_init(model):
# return sum(model.Psub[i] for i in model.N)
# model.PPS=Param(model.PS,initialize=PPS_init)
# Connection distance of all the edges
model.dist = Param(model.links)
data.load(filename='distances.csv', param=model.dist)
#TODO use the npv cost of the lines
model.weights = Param(model.links_decision)
data.load(filename='weights_decision_lines.csv', param=model.weights)
#data.load(filename='weights_decision_lines_npv.csv', param=model.weights)
# Electrical parameters of all the cables
model.V_ref = Param(initialize=voltage)
model.A_ref = Param(initialize=Abase)
model.E_min = Param(initialize=Vmin)
model.R_ref = Param(initialize=resistance)
model.X_ref = Param(initialize=reactance)
model.P_max = Param(initialize=Pmax)
model.cf = Param(initialize=line_cost)
model.Z = Param(initialize=model.R_ref + model.X_ref * 0.5)
model.Z_ref = Param(initialize=model.V_ref ** 2 / Abase)
model.n_clusters = Param(initialize=n_clusters)
model.coe = Param(initialize=coe)
#####################Define variables#####################
# binary variable x[i,j]: 1 if the connection i,j is present, 0 otherwise
model.x = Var(model.links_decision, within=Binary)
# power[i,j] is the power flow of connection i-j
model.P = Var(model.links)
# positive variables E(i) is p.u. voltage at each node
model.E = Var(model.N, within=NonNegativeReals)
# microgrid
model.z = Var(model.N_MG, within=Binary)
# binary variable k[i]: 1 if node i is a primary substation, 0 otherwise
model.k = Var(model.N_PS, within=Binary)
# Power output of Primary substation
model.PPS = Var(model.N_PS, within=NonNegativeReals)
model.MG_output = Var(model.N_MG)
#####################Define constraints###############################
# def Make_problem_easy(model,i,j):
# return model.x[i,j]+model.weights[i,j]>=1
# model.easy = Constraint(model.links, rule=Make_problem_easy)
def Radiality_rule(model):
# return summation(model.x)==len(model.N)-summation(model.k)
return summation(model.x) == model.n_clusters
model.Radiality = Constraint(rule=Radiality_rule)
def Power_flow_conservation_rule(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j] for j in model.NodesOut[node])) == model.Psub[node]
model.Power_flow_conservation = Constraint(model.N_clusters, rule=Power_flow_conservation_rule)
def Power_flow_conservation_rule2(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j] for j in model.NodesOut[node])) == - model.MG_output[node]
model.Power_flow_conservation2 = Constraint(model.N_MG, rule=Power_flow_conservation_rule2)
def Power_flow_conservation_rule3(model, node):
return (sum(model.P[j, node] for j in model.NodesIn[node]) - sum(
model.P[node, j] for j in model.NodesOut[node])) == - model.PPS[node]
model.Power_flow_conservation3 = Constraint(model.N_PS, rule=Power_flow_conservation_rule3)
def Power_upper_decision(model, i, j):
return model.P[i, j] <= model.P_max * model.x[i, j]
model.Power_upper_decision = Constraint(model.links_decision, rule=Power_upper_decision)
def Power_lower_decision(model, i, j):
return model.P[i, j] >= -model.P_max * model.x[i, j]
model.Power_lower_decision = Constraint(model.links_decision, rule=Power_lower_decision)
def Power_upper_clusters(model, i, j):
return model.P[i, j] <= model.P_max
model.Power_upper_clusters = Constraint(model.links_clusters, rule=Power_upper_clusters)
def Power_lower_clusters(model, i, j):
return model.P[i, j] >= -model.P_max
model.Power_lower_clusters = Constraint(model.links_clusters, rule=Power_lower_clusters)
# Voltage constraints
def Voltage_balance_rule(model, i, j):
return (model.E[i] - model.E[j]) + model.x[i, j] - 1 <= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule = Constraint(model.links_decision, rule=Voltage_balance_rule)
def Voltage_balance_rule2(model, i, j):
return (model.E[i] - model.E[j]) - model.x[i, j] + 1 >= model.dist[i, j] / 1000 * model.P[
i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule2 = Constraint(model.links_decision, rule=Voltage_balance_rule2)
def Voltage_balance_rule3(model, i, j):
return (model.E[i] - model.E[j]) <= model.dist[i, j] / 1000 * model.P[i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule3 = Constraint(model.links_clusters, rule=Voltage_balance_rule3)
def Voltage_balance_rule4(model, i, j):
return (model.E[i] - model.E[j]) >= model.dist[i, j] / 1000 * model.P[i, j] * model.Z / model.Z_ref
model.Voltage_balance_rule4 = Constraint(model.links_clusters, rule=Voltage_balance_rule4)
def Voltage_limit(model, i):
return model.E[i] >= model.k[i] * (model.PS_voltage[i] - model.E_min) + model.E_min
model.Voltage_limit = Constraint(model.N_PS, rule=Voltage_limit)
def Voltage_PS2(model, i):
return model.E[i] <= model.PS_voltage[i]
model.Voltage_PS2 = Constraint(model.N_PS, rule=Voltage_PS2)
def Voltage_limit_MG(model, i):
return model.E[i] <= 1
model.Voltage_limit_MG = Constraint(model.N_MG, rule=Voltage_limit_MG)
def Voltage_limit_MG2(model, i):
return model.E[i] >= model.z[i] * (1 - model.E_min) + model.E_min
model.Voltage_limit_MG2 = Constraint(model.N_MG, rule=Voltage_limit_MG2)
def Voltage_limit_clusters2(model, i):
return model.E[i] >= model.E_min
model.Voltage_limit_clusters2 = Constraint(model.N_clusters, rule=Voltage_limit_clusters2)
def PS_power_rule_upper(model, i):
return model.PPS[i] <= model.PSmax[i] * model.k[i]
model.PS_power_upper = Constraint(model.N_PS, rule=PS_power_rule_upper)
def Balance_rule(model):
return (sum(model.PPS[i] for i in model.N_PS) + sum(model.MG_output[i] for i in model.N_MG) - sum(
model.Psub[i] for i in model.N_clusters)) == 0
model.Balance = Constraint(rule=Balance_rule)
def MG_power_limit(model, i):
return model.MG_output[i] == model.z[i] * model.microgrid_power[i]
model.MG_power_limit = Constraint(model.N_MG, rule=MG_power_limit)
####################Define objective function##########################
print(coe)
def ObjectiveFunction(model):
# model.weights is in euro, model.coe is euro/MWh,
return summation(model.weights, model.x) + summation(model.mg_cost, model.z) * 1000 + \
sum(model.energy[i] * (1 - model.z[i]) for i in model.N_MG) * model.coe + summation(model.ps_cost,
model.k)
# +sum((model.P[i]/model.A_ref)**2*0.5*1.25*model.R_ref/model.Z_ref*model.dist[i]/1000*24*365*20 for i in model.links)
# return summation(model.dist,model.x)*model.cf/1000 + summation(model.k) *model.cPS
model.Obj = Objective(rule=ObjectiveFunction, sense=minimize)
#############Solve model##################
instance = model.create_instance(data)
print('Instance is constructed:', instance.is_constructed())
# opt = SolverFactory('cbc',executable=r'C:\Users\Asus\Desktop\POLIMI\Thesis\GISELE\Gisele_MILP\cbc')
opt = SolverFactory('gurobi')
# opt.options['numericfocus']=0
opt.options['mipgap'] = 0.02
opt.options['presolve']=2
# opt.options['mipfocus'] = 3
print('Starting optimization process')
time_i = datetime.now()
opt.solve(instance, tee=True, symbolic_solver_labels=True)
time_f = datetime.now()
print('Time required for optimization is', time_f - time_i)
links = instance.x
power = instance.P
subs = instance.k
voltage = instance.E
PS = instance.PPS
mg_output = instance.MG_output
microGrid = instance.z
links_clusters = instance.links_clusters
# voltage_drop=instance.z
connections_output = pd.DataFrame(columns=[['id1', 'id2', 'power']])
PrSubstation = pd.DataFrame(columns=[['index', 'power']])
all_lines = pd.DataFrame(columns=[['id1', 'id2', 'power']])
Voltages = | pd.DataFrame(columns=[['index', 'voltage [p.u]']]) | pandas.DataFrame |
import numpy as np
from pandas import DataFrame, Series, DatetimeIndex, date_range
try:
from pandas.plotting import andrews_curves
except ImportError:
from pandas.tools.plotting import andrews_curves
import matplotlib
matplotlib.use('Agg')
class Plotting(object):
def setup(self):
self.s = Series(np.random.randn(1000000))
self.df = | DataFrame({'col': self.s}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
from swstats import *
from scipy.stats import ttest_ind
import xlsxwriter
from statsmodels.stats.multitest import multipletests
from statsmodels.stats.proportion import proportions_ztest
debugging = False
def pToSign(pval):
if pval < .001:
return "***"
elif pval < .01:
return "**"
elif pval < .05:
return "*"
elif pval < .1:
return "+"
else:
return ""
def analyzeExperiment_ContinuousVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1mean = np.mean(order_value_control_group)
arm1sd = np.std(order_value_control_group)
arm1text = "" + "{:.2f}".format(arm1mean) + " (" + "{:.2f}".format(arm1sd) + ")"
# Effect of Arm 2
arm2mean = np.mean(order_value_arm2_group)
arm2sd = np.std(order_value_arm2_group)
tscore, pval2 = ttest_ind(order_value_control_group, order_value_arm2_group)
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2mean) + " (" + "{:.2f}".format(arm2sd) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3mean = np.mean(order_value_arm3_group)
arm3sd = np.std(order_value_arm3_group)
tscore, pval3 = ttest_ind(order_value_control_group, order_value_arm3_group)
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3mean) + " (" + "{:.2f}".format(arm3sd) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4mean = np.mean(order_value_arm4_group)
arm4sd = np.std(order_value_arm4_group)
tscore, pval4 = ttest_ind(order_value_control_group, order_value_arm4_group)
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4mean) + " (" + "{:.2f}".format(arm4sd) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
tscore, pval2to4 = ttest_ind(order_value_arm2_group, order_value_arm4_group)
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4mean - arm2mean) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
tscore, pval3to4 = ttest_ind(order_value_arm3_group, order_value_arm4_group)
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4mean - arm3mean) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeExperiment_BinaryVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1Successes = sum(order_value_control_group.isin([True, 1]))
arm1Count = sum(order_value_control_group.isin([True, False, 1, 0]))
arm1PercentSuccess = arm1Successes/arm1Count
arm1text = "" + "{:.2f}".format(arm1PercentSuccess) + " (" + "{:.0f}".format(arm1Successes) + ")"
# Effect of Arm 2
arm2Successes = sum(order_value_arm2_group.isin([True, 1]))
arm2Count = sum(order_value_arm2_group.isin([True, False, 1, 0]))
arm2PercentSuccess = arm2Successes/arm2Count
zstat, pval2 = proportions_ztest(count=[arm1Successes,arm2Successes], nobs=[arm1Count,arm2Count], alternative='two-sided')
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2PercentSuccess) + " (" + "{:.0f}".format(arm2Successes) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3Successes = sum(order_value_arm3_group.isin([True, 1]))
arm3Count = sum(order_value_arm3_group.isin([True, False, 1, 0]))
arm3PercentSuccess = arm3Successes/arm3Count
zstat, pval3 = proportions_ztest(count=[arm1Successes,arm3Successes], nobs=[arm1Count,arm3Count], alternative='two-sided')
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3PercentSuccess) + " (" + "{:.0f}".format(arm3Successes) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4Successes = sum(order_value_arm4_group.isin([True, 1]))
arm4Count = sum(order_value_arm4_group.isin([True, False, 1, 0]))
arm4PercentSuccess = arm4Successes/arm4Count
zstat, pval4 = proportions_ztest(count=[arm1Successes,arm4Successes], nobs=[arm1Count,arm4Count], alternative='two-sided')
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4PercentSuccess) + " (" + "{:.0f}".format(arm4Successes) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
zstat, pval2to4 = proportions_ztest(count=[arm2Successes,arm4Successes], nobs=[arm2Count,arm4Count], alternative='two-sided')
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm2PercentSuccess) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
zstat, pval3to4 = proportions_ztest(count=[arm3Successes,arm4Successes], nobs=[arm3Count,arm4Count], alternative='two-sided')
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm3PercentSuccess) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeResults(dta, outputFileName, scoringVars, surveyVersion, primaryOnly=True):
if primaryOnly:
dta = dta[dta.IsPrimaryWave].copy()
dataDir = "C:/Dev/src/ssascams/data/"
''' Analyze the answers'''
writer = pd.ExcelWriter(dataDir + 'RESULTS_' + outputFileName + '.xlsx', engine='xlsxwriter')
# ###############
# Export summary stats
# ###############
demographicVars = ['trustScore', 'TotalIncome', 'incomeAmount', 'Race', 'race5', 'employment3', 'educYears', 'Married', 'marriedI', 'Age', 'ageYears', 'Gender', 'genderI']
allSummaryVars = ["percentCorrect", "surveyArm", "Wave", "daysFromTrainingToTest"] + scoringVars + demographicVars
summaryStats = dta[allSummaryVars].describe()
summaryStats.to_excel(writer, sheet_name="summary_FullPop", startrow=0, header=True, index=True)
grouped = dta[allSummaryVars].groupby(["surveyArm"])
summaryStats = grouped.describe().unstack().transpose().reset_index()
summaryStats.rename(columns={'level_0' :'VarName', 'level_1' :'Metric'}, inplace=True)
summaryStats.sort_values(['VarName', 'Metric'], inplace=True)
summaryStats.to_excel(writer, sheet_name="summary_ByArm", startrow=0, header=True, index=False)
if ~primaryOnly:
grouped = dta[allSummaryVars].groupby(["surveyArm", "Wave"])
summaryStats = grouped.describe().unstack().transpose().reset_index()
summaryStats.rename(columns={'level_0' :'VarName', 'level_1' :'Metric'}, inplace=True)
summaryStats.sort_values(['Wave','VarName', 'Metric'], inplace=True)
# grouped.describe().reset_index().pivot(index='name', values='score', columns='level_1')
summaryStats.to_excel(writer, sheet_name="summary_ByArmAndWave", startrow=0, header=True, index=False)
# summaryStats.to_csv(dataDir + "RESULTS_" + outputFileName + '.csv')
# ###############
# RQ1: What is the effect?
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numCorrect")
row2 = analyzeExperiment_ContinuousVar(dta, "numFakeLabeledReal")
row3 = analyzeExperiment_ContinuousVar(dta, "numRealLabeledFake")
row4 = analyzeExperiment_ContinuousVar(dta, "percentCorrect")
pd.DataFrame([row1, row2, row3, row4]).to_excel(writer, sheet_name="r1", startrow=1, header=True, index=True)
##############
# RQ1* Robustness check on result: is the experiment randomized correctly?
##############
# NumCorrect Regression
resultTables = ols('numCorrect ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r1_reg", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r1_reg", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ2: Communication Type
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numEmailsCorrect")
row2 = analyzeExperiment_ContinuousVar(dta, "numSMSesCorrect")
row3 = analyzeExperiment_ContinuousVar(dta, "numLettersCorrect")
pd.DataFrame([row1, row2, row3]).to_excel(writer, sheet_name="r2", startrow=1, header=True, index=True)
##############
# RQ2* Robustness check on Emails result: is the experiment randomized correctly?
##############
# NumEmailsCorrect Regression
resultTables = ols('numEmailsCorrect ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r2_reg", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r2_reg", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ3: Time Delay
# ###############
resultTables = ols('numCorrect ~ C(surveyArm)*Wave + daysFromTrainingToTest', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r3a_CorrectWaveAndDay_Simple", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r3a_CorrectWaveAndDay_Simple", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numEmailsCorrect ~ C(surveyArm)*Wave + daysFromTrainingToTest', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r3b_EmailWaveAndDay_Simple", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r3b_EmailWaveAndDay_Simple", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ4: Rainloop
# ###############
if surveyVersion == '6':
resultTables = ols('NumHeadersOpened ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r4_HeadersOpened", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r4_HeadersOpened",startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
########################
# R5a: What determines fraud susceptibility (whether people get tricked or not)?
# Ie, false negatives
########################
# First Try on Regression
# resultTables = ols('numFakeLabeledReal ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
# 'C(race5) + C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
# pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_numFakeLabeledReal_WRace", startrow=1, header=False, index=False)
# pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_numFakeLabeledReal_WRace", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# Remove race - many variables, small counts - likely over specifying
resultTables = ols('numFakeLabeledReal ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r5a_numFakeLabeledReal", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r5a_numFakeLabeledReal", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numLabeledReal ~ C(surveyArm) + trustScore + lIncomeAmount + C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_numLabeledReal", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_numLabeledReal", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
########################
# R5b: What determines lack of trust?
########################
# Ie, false positive
resultTables = ols('numRealLabeledFake ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r5b_numRealLabeledFake", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r5b_numRealLabeledFake", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numLabeledFake ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_numLabeledFake", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_numLabeledFake", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ6: Impostor Type
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numCorrect_SSA")
row2 = analyzeExperiment_ContinuousVar(dta, "numCorrect_Other")
row3 = analyzeExperiment_ContinuousVar(dta, "numEmailsCorrect_SSA")
row4 = analyzeExperiment_ContinuousVar(dta, "numEmailsCorrect_Other")
pd.DataFrame([row1, row2, row3, row4]).to_excel(writer, sheet_name="r6", startrow=1, header=True, index=True)
# ###############
# RQ7: Likelihood of being tricked
# ###############
dta['isTrickedByFraud'] = dta.numFakeLabeledReal > 0
dta['isTrickedByAnySSAEmail'] = dta.numEmailsCorrect_SSA < max(dta.numEmailsCorrect_SSA)
dta['isTrickedByAnyNonSSAEmail'] = dta.numEmailsCorrect_Other < max(dta.numEmailsCorrect_Other)
row1 = analyzeExperiment_BinaryVar(dta, "isTrickedByFraud")
row2 = analyzeExperiment_BinaryVar(dta, "isTrickedByAnySSAEmail")
row3 = analyzeExperiment_BinaryVar(dta, "isTrickedByAnyNonSSAEmail")
pd.DataFrame([row1, row2, row3]).to_excel(writer, sheet_name="r7", startrow=1, header=True, index=True)
# ###############
# RQ8: Every Email
# ###############
filter_cols = [col for col in dta.columns if col.startswith('Correct_')]
theRows = []
for filter_col in filter_cols:
arow = analyzeExperiment_BinaryVar(dta, filter_col)
theRows = theRows + [arow]
pd.DataFrame(theRows).to_excel(writer, sheet_name="r8", startrow=1, header=True, index=True)
# ##############
# Correlations
################
indepVars = ['surveyArm', 'daysFromTrainingToTest', 'Wave', 'trustScore', 'incomeAmount', 'race5', 'employment3', 'educYears', 'marriedI', 'ageYears','Gender',
'previousFraudYN', 'lose_moneyYN', 'duration_p1', 'duration_p1_Quantile', 'duration_p2', 'duration_p2_Quantile', 'Employment']
depVars = ['numCorrect', 'numFakeLabeledReal', 'numRealLabeledFake']
dta.Wave = dta.Wave.astype('float64')
# Look at Correlations among variables
allVarsToCorr = depVars + indepVars
corrMatrix = dta[allVarsToCorr].corr()
pd.DataFrame(corrMatrix).to_excel(writer, sheet_name="corrMatrix", startrow=1, header=True, index=True)
# duration_p1 is a proxy for arm, so strange results there.
# we'd need a fine-tuned var. Let's use p2 instead. Also, the Quantile shows a much stronger relationship than the raw values (likely since it is not linear in the depvars)
# Losing money and income and age show a moderate relationship
# ##############
# Scatter Plots
################
import seaborn as sns
sns.set_theme(style="ticks")
toPlot = dta[['numCorrect', 'surveyArm', 'daysFromTrainingToTest', 'Wave', 'trustScore', 'lose_moneyYN', 'duration_p2_Quantile']]
sns.pairplot(toPlot, hue="surveyArm")
# ##############
# Regressions
# ##############
# Sanity Check regression
resultTables = ols('lIncomeAmount ~ageYears + ageYearsSq + educYears + marriedI + genderI', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_Sanity", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_Sanity", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# Simple Experiment-Only test
resultTables = ols('numCorrect ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numCorrect_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numCorrect_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numEmailsCorrect ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numEmailsCorrect_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numEmailsCorrect_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# Full regression, within each specific wave, with controls
startRow = 1
for wave in dta.Wave.unique():
# worksheet = writer.book.add_worksheet("numCorrect_ByArmAndWave")
# worksheet.write(startRow, 1, 'Wave ' + str(wave))
# startRow = startRow + 2
resultTables = ols('numCorrect ~ C(surveyArm) + lIncomeAmount + ageYears + ageYearsSq + educYears + marriedI + genderI', data=dta.loc[dta.Wave==wave]).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numCorrect_ByArmInWave", startrow=startRow, header=False, index=False)
startRow = startRow + len(resultTables[0]) + 2
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numCorrect_ByArmInWave", startrow=startRow, header=False, index=False)
startRow = startRow + len(resultTables[1]) + 2
startRow = 1
for wave in dta.Wave.unique():
# worksheet = writer.book.add_worksheet("numEmailsCorrect_ByArmAndWave")
# worksheet.write(startRow, 1, 'Wave ' + str(wave))
# startRow = startRow + 2
resultTables = ols('numEmailsCorrect ~ C(surveyArm) + lIncomeAmount + ageYears + ageYearsSq + educYears + marriedI + genderI', data=dta.loc[dta.Wave == wave]).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numEmailsCorrect_ByArmInWave", startrow=startRow, header=False, index=False)
startRow = startRow + len(resultTables[0]) + 2
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numEmailsCorrect_ByArmInWave", startrow=startRow, header=False, index=False)
startRow = startRow + len(resultTables[1]) + 2
resultTables = ols('numLettersCorrect ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numLettersCorrect_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numLettersCorrect_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numSMSesCorrect ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numSMSesCorrect_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numSMSesCorrect_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
if (surveyVersion in ['5D', '5P']):
resultTables = ols('NumHeadersOpened ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="NumHeadersOpened_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="NumHeadersOpened_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('NumEmailsActedUpon ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="NumEmailsActedUpon_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="NumEmailsActedUpon_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numLabeledFake ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numLabeledFake_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numLabeledFake_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numFakeLabeledFake ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numFakeLabeledFake_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numFakeLabeledFake_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numRealLabeledReal ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numRealLabeledReal_ByArm", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="numRealLabeledReal_ByArm", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
## ###########
# Is there an effect of wave: Additional Exporation
##############
# NumCorrect Regression
resultTables = ols('numCorrect ~ C(surveyArm)*Wave + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_CorrectWithWaveAndDays", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_CorrectWithWaveAndDays", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numCorrect ~ C(surveyArm)*Wave + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
| pd.DataFrame(resultTables[0]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 14:51:54 2020
A collection of cleanup functions that should just run.
Just keep them in one place and clean up the file structure a bit.
Expects that the entire pipeline up until now has been completed.
Hopefully this all works because will be hard to debug!!
Things might be out of order so need to check this.
@author: npittman
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import xesmf as xe
import cbsyst as cb
import os
from carbon_math import *
def find_enso_events_redundent(threshold=0.5):
'''
A function to pull ENSO data from our datasets/indexes/meiv2.csv
save events (months) stronger than threshold (0.5 by default)
'processed/indexes/el_nino_events.csv'
'processed/indexes/la_nina_events.csv'
Returns
-------
None.
'''
#enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col='Year')
enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col=0,header=None)
enso_flat=enso.stack()
enso_dates=pd.date_range('1979','2020-07-01',freq='M')- pd.offsets.MonthBegin(1) #Probably want to check this is correct if updating.
enso_timeseries=pd.DataFrame({'Date':enso_dates,'mei':enso_flat})
#Check if we are in or out of an event so far
el_event=False
la_event=False
el_startdate=''
la_startdate=''
elnino=pd.DataFrame()
lanina=pd.DataFrame()
for i,today in enumerate(enso_timeseries.Date):
val=enso_timeseries.mei.iloc[i]
if val>=threshold:
if el_event==False: #And we havent yet entered an event
el_startdate=today
el_event=True
else:
pass
#Dont need to do anything because it will get caught later
else:
if el_event==True:
elnino=elnino.append({'start':el_startdate.to_datetime64(),
'end':enso_timeseries.Date.iloc[i-1],
'mei':enso_timeseries.mei.iloc[i-1]},ignore_index=True)
el_event=False
for i,today in enumerate(enso_timeseries.Date):
val=enso_timeseries.mei.iloc[i]
if val<=-threshold:
if la_event==False: #And we havent yet entered an event
la_startdate=today
la_event=True
else:
pass
#Dont need to do anything because it will get caught later
else:
if la_event==True:
lanina=lanina.append({'start':la_startdate.to_datetime64(),
'end':enso_timeseries.Date.iloc[i-1],
'mei':enso_timeseries.mei.iloc[i-1]},ignore_index=True)
la_event=False
print(elnino)
print(lanina)
elnino.to_csv('processed/indexes/el_nino_events.csv')
lanina.to_csv('processed/indexes/la_nina_events.csv')
def find_enso_events_CP(threshold=0.5):
'''
A function to pull ENSO data from our datasets/indexes/meiv2.csv
save events (months) stronger than threshold (0.5 by default)
Modified to include CP, EP, El Nino and La Nina events and are saved to csv.
'processed/indexes/el_nino_events.csv'
'processed/indexes/la_nina_events.csv'
'processed/indexes/ep_el_nino_events.csv'
'processed/indexes/cp_el_nina_events.csv'
Returns
-------
None.
'''
#enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col='Year')
enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col=0,header=None)
enso=enso.iloc[3:] #Just so Both EMI and MEI start in 1981-01-01
enso_flat=enso.stack()
enso_dates=pd.date_range('1982','2020-07-01',freq='M')- pd.offsets.MonthBegin(1) #Probably want to check this is correct if updating.
emi=pd.read_csv('datasets/indexes/SINTEX_EMI.csv')
emi.time=emi.time.astype('datetime64[M]')
emi.index=emi.time
emi=emi.Obs
enso_timeseries=pd.DataFrame({'Date':enso_dates,'mei':enso_flat})
fp='processed/combined_dataset/month_data_exports.nc'
dat=xr.open_mfdataset(fp)
#Check if we are in or out of an event so far
el_event=False
la_event=False
ep_event=False
cp_event=False
cpc_event=False
el_startdate=''
la_startdate=''
ep_startdate=''
cp_startdate=''
cpc_startdate=''
elnino=pd.DataFrame()
lanina=pd.DataFrame()
cp=pd.DataFrame()
cpc=pd.DataFrame()
ep=pd.DataFrame()
month_threshold=5 #Months over threshold)
threshold=0.5
#All El Nino
for i,today in enumerate(enso_timeseries.Date):
val=enso_timeseries.mei.iloc[i]
if val>=threshold:
if el_event==False: #And we havent yet entered an event
el_startdate=today
el_event=True
else:
pass
#Dont need to do anything because it will get caught later
else:
if el_event==True:
if ((today-el_startdate)>=np.timedelta64(month_threshold,'M')): #Make sure event is long enough
if el_startdate.to_datetime64()!=enso_timeseries.Date.iloc[i-1].to_datetime64():
elnino=elnino.append({'start':el_startdate.to_datetime64(),
'end':enso_timeseries.Date.iloc[i-1],
'mei':enso_timeseries.mei.iloc[i-1]},ignore_index=True)
el_event=False
else: el_event=False
#La Nina
for i,today in enumerate(enso_timeseries.Date):
val=enso_timeseries.mei.iloc[i]
if val<=-threshold:
if la_event==False: #And we havent yet entered an event
la_startdate=today
la_event=True
else:
pass
#Dont need to do anything because it will get caught later
else:
if la_event==True:
if ((today-la_startdate)>=np.timedelta64(month_threshold,'M')): #Make sure event is long enough
if la_startdate.to_datetime64()!=enso_timeseries.Date.iloc[i-1].to_datetime64():
lanina=lanina.append({'start':la_startdate.to_datetime64(),
'end':enso_timeseries.Date.iloc[i-1],
'mei':enso_timeseries.mei.iloc[i-1]},ignore_index=True)
la_event=False
else: la_event=False
#CP events
for i,today in enumerate(emi.index):
#val=emi.iloc[i]
val=np.mean(emi.iloc[i:i+2])
if val>=threshold:
if cp_event==False: #And we havent yet entered an event
cp_startdate=today
cp_event=True
else:
pass
#Dont need to do anything because it will get caught later
else:
if cp_event==True:
if ((today-cp_startdate)>=np.timedelta64(month_threshold,'M')): #Make sure event is long enough
if cp_startdate.to_datetime64()!=emi.index[i-1].to_datetime64():
cp=cp.append({'start':cp_startdate.to_datetime64(),
'end':emi.index[i-1],
'emi':emi.values[i-1]},ignore_index=True)
cp_event=False
else: cp_event=False
#EP El Nino
for i,today in enumerate(enso_timeseries.Date):
val=enso_timeseries.mei.iloc[i]
val1=np.mean(enso_timeseries.mei.iloc[i])
emi_val=emi.iloc[i]
emi_val1=np.mean(emi.iloc[i:i+8]) #Just to make sure the 2015 EP event is classified as such
print(today)
print(emi.index[i])
print(enso_timeseries.iloc[i].Date)
print()
print(emi_val,val)
print('\n')
#print()
if (val1>=threshold)&(emi_val1<threshold):#&(emi_val1<threshold):
if ep_event==False: #And we havent yet entered an event
ep_startdate=today
ep_event=True
else:
pass
#Dont need to do anything because it will get caught later
else:
if ep_event==True:
if ((today-ep_startdate)>=np.timedelta64(month_threshold,'M')): #Make sure event is long enough
if ep_startdate.to_datetime64()!=enso_timeseries.Date.iloc[i-1].to_datetime64():
ep=ep.append({'start':ep_startdate.to_datetime64(),
'end':enso_timeseries.Date.iloc[i-1],
'mei':enso_timeseries.mei.iloc[i-1]},ignore_index=True)
ep_event=False
else: ep_event=False
print(elnino)
print(lanina)
print(cp)
print(ep)
elnino.to_csv('processed/indexes/el_nino_events.csv')
lanina.to_csv('processed/indexes/la_nina_events.csv')
cp.to_csv('processed/indexes/cp_events.csv')
ep.to_csv('processed/indexes/ep_events.csv')
def combine_csvs_to_nc():
'''
Combine all our data into daily, weekly, monthly or all data files.
This should be done at the end of 7ab already so may not be essential to run this one.
Previously known as data day average.
'''
moorings=['110W','125W','140W','155W','170W','165E']
mooring_int=[110,125,140,155,170,195]
aavg_a=[]
davg_a=[]
wavg_a=[]
mavg_a=[]
for mooring in moorings:
fp='processed/combined_dataset/'+mooring+'_combined.csv'
dat=pd.read_csv(fp,index_col=False)
#print(dat)
dat['Date']=dat.Date.astype(np.datetime64)
dat.set_index(pd.DatetimeIndex(dat.Date),inplace=True)
alld = dat.to_xarray()#.drop('Unnamed: 0')
davg = dat.resample('D').mean().to_xarray()#.drop('Unnamed: 0') #Day average
wavg = dat.resample('W').mean().to_xarray()#.drop('Unnamed: 0') #Week average
mavg = dat.resample('M').mean().to_xarray()#.drop('Unnamed: 0') #Month average
aavg_a.append(alld)
davg_a.append(davg)
wavg_a.append(wavg)
mavg_a.append(mavg)
#plt.scatter(wavg.co2flux_gmyr,wavg.mod_vgbm)
#plt.scatter(wavg.co2flux_gmyr,wavg.mod_cpbm)
#plt.scatter(wavg.co2flux_gmyr,wavg.mod_cafe)
#plt.scatter(wavg.co2flux_gmyr,wavg.mod_eppley)
all_data=xr.concat(aavg_a,dim='Mooring')
daily=xr.concat(davg_a,dim='Mooring')
weekly=xr.concat(wavg_a,dim='Mooring')
monthly=xr.concat(mavg_a,dim='Mooring')
all_data.coords['Mooring']=moorings
daily.coords['Mooring']=mooring_int
weekly.coords['Mooring']=mooring_int
monthly.coords['Mooring']=mooring_int
fp='processed/combined_dataset/'
try:
os.remove(fp+'month_data.nc')
os.remove(fp+'week_data.nc')
os.remove(fp+'day_data.nc')
except:
pass
all_data.to_netcdf(fp+'all_data.nc',engine='h5netcdf')
daily.to_netcdf(fp+'day_data.nc')
weekly.to_netcdf(fp+'week_data.nc')
monthly.to_netcdf(fp+'month_data.nc')
def npp_csvs_to_nc():
'''
Cleanup function to combine the csvs for each mooring (A heap of files) into a single 4d xarray netcdf.
'''
moorings=['110W','125W','140W','155W','170W','165E']
#moorings=['155W','170W','165E']
#moorings=['155W']
flux_holder=[]
chl_holder=[]
for i, mooring_name in enumerate(moorings):
#Primary Productivity Models and Time Series
mod_vgpm=pd.read_csv('processed/npp_mooring_timeseries/vgpm_mod_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','mod_vgpm'],index_col=0).to_xarray()
mod_cbpm=pd.read_csv('processed/npp_mooring_timeseries/cbpm_mod_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','mod_cbpm'],index_col=0).to_xarray()
mod_eppley=pd.read_csv('processed/npp_mooring_timeseries/eppley_mod_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','mod_eppley'],index_col=0).to_xarray()
mod_cafe=pd.read_csv('processed/npp_mooring_timeseries/cafe_mod_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','mod_cafe'],index_col=0).to_xarray()
sw_vgpm=pd.read_csv('processed/npp_mooring_timeseries/vgbm_sw_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','sw_vgbm'],index_col=0).to_xarray()
sw_cbpm=pd.read_csv('processed/npp_mooring_timeseries/cbpm_sw_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','sw_cbpm'],index_col=0).to_xarray()
sw_eppley=pd.read_csv('processed/npp_mooring_timeseries/eppley_sw_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','sw_eppley'],index_col=0).to_xarray()
sw_cafe=pd.read_csv('processed/npp_mooring_timeseries/cafe_sw_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','sw_cafe'],index_col=0).to_xarray()
viirs_vgpm=pd.read_csv('processed/npp_mooring_timeseries/vgpm_viirs_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','viirs_vgpm'],index_col=0).to_xarray()
viirs_cbpm=pd.read_csv('processed/npp_mooring_timeseries/cbpm_viirs_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','viirs_cbpm'],index_col=0).to_xarray()
viirs_eppley=pd.read_csv('processed/npp_mooring_timeseries/eppley_viirs_nc_'+mooring_name+'.csv',skiprows=1,names=['Date','viirs_eppley'],index_col=0).to_xarray()
#Chl Models
modis_tpca=pd.read_csv('processed/npp_mooring_timeseries/modis_chl_tpca_'+mooring_name+'.csv',skiprows=1,names=['Date','mod_tpca'],index_col=0).to_xarray()
seawifs_tpca=pd.read_csv('processed/npp_mooring_timeseries/seawifs_chl_tpca_'+mooring_name+'.csv',skiprows=1,names=['Date','sw_tpca'],index_col=0).to_xarray()
viirs_chlor_a=pd.read_csv('processed/npp_mooring_timeseries/viirs_chlor_a_'+mooring_name+'.csv',skiprows=1,names=['Date','viirs_chlora'],index_col=0).to_xarray()
modis_chlor_a=pd.read_csv('processed/npp_mooring_timeseries/modis_chlor_a_'+mooring_name+'.csv',skiprows=1,names=['Date','modis_chlora'],index_col=0).to_xarray()
seawifs_chlor_a=pd.read_csv('processed/npp_mooring_timeseries/seawifs_chlor_a_'+mooring_name+'.csv',skiprows=1,names=['Date','seawifs_chlora'],index_col=0).to_xarray()
meris_chlor_a=pd.read_csv('processed/npp_mooring_timeseries/meris_chlor_a_'+mooring_name+'.csv',skiprows=1,names=['Date','meris_chlora'],index_col=0).to_xarray()
combined_flux=xr.merge([mod_vgpm,mod_cbpm,mod_eppley,mod_cafe,
sw_vgpm,sw_cbpm,sw_eppley,sw_cafe,
viirs_vgpm,viirs_cbpm,viirs_eppley,
modis_tpca,seawifs_tpca,
viirs_chlor_a,modis_chlor_a,seawifs_chlor_a,meris_chlor_a])
combined_flux=combined_flux.assign_coords(Mooring=mooring_name)
#combined_chl=combined_chl.assign_coords(Mooring=mooring_name)
flux_holder.append(combined_flux)
#chl_holder.append(combined_chl)
flux=xr.concat(flux_holder,dim='Mooring')
flux['Date']=flux.Date.astype('datetime64[D]')
flux=flux.resample(Date='M').mean()
flux['Date']=flux.Date.astype('datetime64[M]')
flux=flux.rename({'sw_vgbm':'sw_vgpm'})
try:
os.remove('processed/flux/npp.nc')
except:
pass
flux.to_netcdf('processed/flux/npp.nc',engine='h5netcdf')
def add_cafe_and_sst(fp='processed/combined_dataset/month_data_exports.nc'):
#Bit of a hacky way to modify our data netcdf with the sw and cafe product.
dat=xr.open_mfdataset(fp)
npp=xr.open_mfdataset('processed/flux/npp.nc')
dat['Date']=dat['Date'].astype('datetime64[M]')
npp['Date']=npp['Date'].astype('datetime64[M]')
npp['Mooring']=dat.Mooring.values
#dat['sw_cafe']=npp.sw_cafe.T
cafe=dat[['sw_cafe','mod_cafe']]
mean = cafe.to_array(dim='new').mean('new')
dat=dat.assign(cafe=mean)
buff=0.5 #in degrees.
l=0
lats=[l+buff,l-buff]
lns=[165,190,205,220,235,250]
lons=[[lns[0]-buff,lns[0]+buff],
[lns[1]-buff,lns[1]+buff],
[lns[2]-buff,lns[2]+buff],
[lns[3]-buff,lns[3]+buff],
[lns[4]-buff,lns[4]+buff],
[lns[5]-buff,lns[5]+buff]]
lonz=[110, 125, 140, 155, 170, 195]
mooring_sites=['165E','170W','155W','140W','125W','110W']
sst=['datasets/sst/sst.mnmean.nc']
sst=xr.open_mfdataset(sst)
datslice= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import FunctionTransformer
from sktime.pipeline import Pipeline
from sktime.tests.test_pipeline import X_train, y_train, X_test, y_test
from sktime.transformers.compose import ColumnTransformer, Tabulariser, RowwiseTransformer
from sktime.datasets import load_gunpoint
# load data
X_train, y_train = load_gunpoint("TRAIN", return_X_y=True)
X_train = | pd.concat([X_train, X_train], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from samplics.utils.formats import (
numpy_array,
array_to_dict,
dataframe_to_array,
sample_size_dict,
dict_to_dataframe,
sample_units,
convert_numbers_to_dicts,
)
df = | pd.DataFrame({"one": [1, 2, 2, 3, 0], "two": [4, 9, 5, 6, 6]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 18:41:43 2020
@author: Cliente
"""
import pandas as pd
import statistics
from imblearn.pipeline import make_pipeline
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss,RandomUnderSampler
import glob
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, classification_report
from timeit import default_timer as timer
start = timer()
path_files = '../data_input/Data_split'
cutoff = 10
number_of_samples = 50 # Number of SMOTE samples
def smote_data(mineral_file,cutoff,log_file,number_of_samples=15):
mineral = pd.read_csv(mineral_file,index_col='MINERAL')
# Verify cutoff condition
min_list = mineral.index.unique()
pass_minerals = []
cut_minerals = []
for var in min_list:
#print(len(mineral[mineral.index == var]))
if len(mineral[mineral.index == var]) >= cutoff:
#pass_minerals.append(len(mineral[mineral['MINERAL'] == var]))
pass_minerals.append(var)
else:
#cut_minerals.append(len(mineral[mineral['MINERAL'] == var]))
cut_minerals.append(var)
# remove minerals bellow cutoff
mineral.drop(cut_minerals, inplace=True)
count_min = []
for var in pass_minerals:
count_min.append(mineral.loc[var,:].shape[0])
#### RUN SMOTE TO BALANCE THE CLASSES ####
# test if the data has a minimum of 2 classes to balance
if mineral.index.unique().shape[0] > 1:
############ oversampling and downsampling base on the median (or a value)
#median_value = int(statistics.median(count_min))
median_value = number_of_samples
#cols = mineral.columns.to_list()[5:]
cols = ['SIO2', 'TIO2', 'AL2O3', 'CR2O3', 'FEOT', 'CAO', 'MGO', 'MNO', 'K2O', 'NA2O', 'P2O5', 'H20',
'F', 'CL', 'NIO', 'CUO', 'COO', 'ZNO', 'PBO', 'S', 'ZRO2', 'AS']
X = mineral[cols].iloc[:,:].values
y = mineral.index.values
down_keys = []
up_keys = []
for var in pass_minerals:
if mineral.loc[var,:].shape[0] > median_value:
down_keys.append(var)
#downsample = {var : median_value}
if mineral.loc[var,:].shape[0] < median_value:
up_keys.append(var)
#upsample= {var : median_value}
else:
None
downsample_dict={}
upsample_dict = {}
for i in down_keys:
downsample_dict[i] = median_value
for i in up_keys:
upsample_dict[i] = median_value
#pipe = make_pipeline(SMOTE(sampling_strategy=upsample_dict),NearMiss(sampling_strategy=downsample_dict,version=1)) ## Near Miss
pipe = make_pipeline(SMOTE(sampling_strategy=upsample_dict,k_neighbors=3),RandomUnderSampler(sampling_strategy=downsample_dict))
X_smt, y_smt = pipe.fit_resample(X, y)
X_new = pd.DataFrame(X_smt, columns=cols)
X_new['MINERAL'] = y_smt
X_new['GROUP'] = mineral.GROUP.unique()[0]
#for var in pass_minerals:
#print(len(X_new[X_new['MINERAL'] == var]))
print("\n\nprocessing group: %s" % mineral.GROUP.unique()[0])
print("%i of %i minerals removed: " % (len(cut_minerals),len(min_list)))
print("accepted: %s" % pass_minerals)
print('rejected: %s' % cut_minerals)
print("median value: %i" % median_value)
print('sample size: %i\n\n' % len(X_new[X_new['MINERAL'] == pass_minerals[0]] ))
log_file.write("processing group: %s\n" % mineral.GROUP.unique()[0])
log_file.write("%i of %i minerals removed: \n" % (len(cut_minerals),len(min_list)))
log_file.write("accepted: %s\n" % pass_minerals)
log_file.write('rejected: %s\n' % cut_minerals)
log_file.write("median value: %i\n" % median_value)
log_file.write('sample size: %i\n\n' % len(X_new[X_new['MINERAL'] == pass_minerals[0]] ))
## Test accuracy after SMOTE
test_acc(X,y,log_file)
## Test accuracy before SMOTE
X_final = X_new[cols].iloc[:,:].values
y_final = X_new['MINERAL'].iloc[:].values
test_acc(X_final,y_final,log_file)
return X_new
def test_acc(X,y,log_file):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
model = RandomForestClassifier(n_estimators=50,max_features='sqrt',n_jobs=-1, oob_score=True)
model.fit(X_train, y_train.ravel())
predictions = model.predict(X_test)
# print classification report
print(classification_report(y_test, predictions))
log_file.write(classification_report(y_test, predictions))
log_file = open("SMOTE_log_Random_Sampler.txt", "w")
mineral_file = "../data_input/Data_split/AMPHIBOLES_rf.csv"
df_main = smote_data(mineral_file,cutoff,log_file,number_of_samples)
input_files = glob.glob(path_files+'\*')
for files in input_files[1:]:
df = smote_data(files,cutoff,log_file,number_of_samples)
df_main = | pd.concat([df_main, df]) | pandas.concat |
import random
import requests
import pandas as pd
from xgboost import XGBClassifier
URL = "https://aydanomachado.com/mlclass/03_Validation.php"
DEV_KEY = "Café com leite"
def replace_sex(data):
data[['sex']] = data[['sex']].replace(
{'I': 0, 'F': 1, 'M': 2}).astype(int)
return data
train = pd.read_csv("abalone_dataset.csv")
test = pd.read_csv("abalone_app.csv")
train = replace_sex(train)
test = replace_sex(test)
X = train.drop("type", inplace=False, axis=1)
Y = train.type
xg = XGBClassifier(booster="gbtree", learning_rate=0.2, min_split_loss=0,
reg_lambda=1, reg_alpha=0, tree_method="exact", silent=False, verbosity=3)
xg.fit(X, Y)
y_pred = list(xg.predict(test))
y_pred[-2] = 1
print(y_pred)
data = {'dev_key': DEV_KEY,
'predictions': | pd.Series(y_pred) | pandas.Series |
import os
import pandas as pd
import gluonts
import numpy as np
import argparse
import json
import pathlib
from mxnet import gpu, cpu
from mxnet.context import num_gpus
import matplotlib.pyplot as plt
from gluonts.dataset.util import to_pandas
from gluonts.mx.distribution import DistributionOutput, StudentTOutput, NegativeBinomialOutput, GaussianOutput
from gluonts.model.deepar import DeepAREstimator
from gluonts.mx.trainer import Trainer
from gluonts.evaluation import Evaluator
from gluonts.evaluation.backtest import make_evaluation_predictions, backtest_metrics
from gluonts.model.predictor import Predictor
from gluonts.dataset.field_names import FieldName
from gluonts.dataset.common import ListDataset
def model_fn(model_dir):
path = pathlib.Path(model_dir)
predictor = Predictor.deserialize(path)
print("model was loaded successfully")
return predictor
def transform_fn(model, request_body, content_type='application/json', accept_type='application/json'):
related_cols = ['holiday', 'temp', 'rain_1h', 'snow_1h', 'clouds_all', 'weather_main', 'weather_description']
FREQ = 'H'
pred_length = 24*7
data = json.loads(request_body)
target_test_df = pd.DataFrame(data['target_values'], index=data['timestamp'])
related_test_df = | pd.DataFrame(data['related_values'], index=data['timestamp']) | pandas.DataFrame |
"""
Procedures needed for Common support estimation.
Created on Thu Dec 8 15:48:57 2020.
@author: MLechner
# -*- coding: utf-8 -*-
"""
import copy
import numpy as np
import pandas as pd
from mcf import mcf_data_functions as mcf_data
from mcf import general_purpose as gp
from mcf import general_purpose_estimation as gp_est
def common_support(predict_file, tree_file, fill_y_file, fs_file, var_x_type,
v_dict, c_dict, cs_list=None, prime_values_dict=None,
pred_tr_np=None, d_tr_np=None):
"""
Remove observations from data files that are off-support.
Parameters
----------
predict_file : String of csv-file. Data to predict the RF.
train_file : String of csv-file. Data to train the RF.
fill_y_file : String of csv-file. Data with y to be used by RF.
fs_file : String of csv-file. Data with y to be used by RF.
var_x_type : Dict. Features.
v_dict : Dict. Variables.
c_dict : Dict. Parameters.
cs_list: Tuple. Contains the information from estimated propensity score
needed to predict for other data. Default is None.
prime_values_dict: Dict. List of unique values for variables to dummy.
Default is None.
pred_t: Numpy array. Predicted treatment probabilities in training data.
Needed to define cut-offs.
d_train: Numpy series. Observed treatment in training data (tree_file).
Returns
-------
predict_file_new : String of csv-file. Adjusted data.
cs_list: Tuple. Contains the information from estimated propensity score
needed to predict for other data.
pred_t: Numpy array. Predicted treatment probabilities in training data.
d_train_tree: estimated tree by sklearn.
"""
def r2_obb(c_dict, idx, oob_best):
if c_dict['with_output']:
print('\n')
print('-' * 80)
print('Treatment: {:2}'.format(c_dict['d_values'][idx]),
'OOB Score (R2 in %): {:6.3f}'.format(oob_best * 100))
print('-' * 80)
def get_data(file_name, x_name):
data = pd.read_csv(file_name)
x_all = data[x_name] # deep copies
obs = len(x_all.index)
return data, x_all, obs
def check_cols(x_1, x_2, name1, name2):
var1 = set(x_1.columns)
var2 = set(x_2.columns)
if var1 != var2:
if len(var1-var2) > 0:
print('Variables in ', name1, 'not contained in ', name2,
*(var1-var2))
if len(var2-var1) > 0:
print('Variables in ', name2, 'not contained in ', name1,
*(var2-var1))
raise Exception(name1 + ' data and ' + name2 + ' data contain' +
' differnt variables. Programm stopped.')
def mean_by_treatment(treat_pd, data_pd):
treat_pd = treat_pd.squeeze()
treat_vals = pd.unique(treat_pd)
print('--------------- Mean by treatment status ------------------')
if len(treat_vals) > 0:
mean = data_pd.groupby(treat_pd).mean()
print(mean.transpose())
else:
print('All obs have same treatment:', treat_vals)
def on_support_data_and_stats(obs_to_del_np, data_pd, x_data_pd, out_file,
upper_l, lower_l, c_dict, header=False,
d_name=None):
obs_to_keep = np.invert(obs_to_del_np)
data_keep = data_pd[obs_to_keep]
gp.delete_file_if_exists(out_file)
data_keep.to_csv(out_file, index=False)
if c_dict['with_output']:
x_keep = x_data_pd[obs_to_keep]
x_delete = x_data_pd[obs_to_del_np]
if header:
print('\n')
print('=' * 80)
print('Common support check')
print('-' * 80)
print('Upper limits on treatment probabilities: ', upper_l)
print('Lower limits on treatment probabilities: ', lower_l)
print('-' * 80)
print('Data investigated and saved:', out_file)
print('-' * 80)
print('Observations deleted: {:4}'.format(np.sum(obs_to_del_np)),
' ({:6.3f}%)'.format(np.mean(obs_to_del_np)*100))
with pd.option_context(
'display.max_rows', 500,
'display.max_columns', 500,
'display.expand_frame_repr', True,
'display.width', 150,
'chop_threshold', 1e-13):
all_var_names = [name.upper() for name in data_pd.columns]
if d_name[0].upper() in all_var_names:
d_keep = data_keep[d_name]
d_delete = data_pd[d_name]
d_delete = d_delete[obs_to_del_np]
d_keep_count = d_keep.value_counts(sort=False)
d_delete_count = d_delete.value_counts(sort=False)
d_keep_count = pd.concat(
[d_keep_count,
d_keep_count / np.sum(obs_to_keep) * 100], axis=1)
d_delete_count = pd.concat(
[d_delete_count,
d_delete_count / np.sum(obs_to_del_np) * 100], axis=1)
d_keep_count.columns = ['Obs.', 'Share in %']
d_delete_count.columns = ['Obs.', 'Share in %']
if c_dict['panel_data']:
cluster_id = data_pd[v_dict['cluster_name']].squeeze()
cluster_keep = cluster_id[obs_to_keep].squeeze()
cluster_delete = cluster_id[obs_to_del_np].squeeze()
print('-' * 80)
print('Observations kept by treatment')
print(d_keep_count)
print('- ' * 20)
print('Observations deleted by treatment')
print(d_delete_count)
if c_dict['panel_data']:
print('- ' * 20)
print('Total number of panel unit:',
len(cluster_id.unique()))
print('Observations belonging to ',
len(cluster_keep.unique()),
'panel units are ON support')
print('Observations belonging to ',
len(cluster_delete.unique()),
'panel units are OFF support')
if d_name[0].upper() in all_var_names:
print()
print('Full sample (ON and OFF support observations)')
mean_by_treatment(data_pd[d_name], x_data_pd)
print('-' * 80)
print('Data ON support')
print('-' * 80)
print(x_keep.describe().transpose())
if d_name[0].upper() in all_var_names:
print()
mean_by_treatment(d_keep, x_keep)
print('-' * 80)
print('Data OFF support')
print('-' * 80)
print(x_delete.describe().transpose())
if d_name[0].upper() in all_var_names:
print()
if np.sum(obs_to_del_np) > 1:
mean_by_treatment(d_delete, x_delete)
else:
print('Only single observation deleted.')
if np.mean(obs_to_del_np) > c_dict['support_max_del_train']:
raise Exception(
'Less than {:3}%'.format(
100-c_dict['support_max_del_train']*100)
+ ' observations left after common support check of'
+ ' training data. Programme terminated. Improve'
+ ' balance of input data for forest building.')
x_name, x_type = gp.get_key_values_in_list(var_x_type)
names_unordered = [] # Split ordered variables into dummies
for j, val in enumerate(x_type):
if val > 0:
names_unordered.append(x_name[j])
fs_adjust = False
obs_fs = 0
if c_dict['train_mcf']:
data_tr, x_tr, obs_tr = get_data(tree_file, x_name) # train,adj.
data_fy, x_fy, obs_fy = get_data(fill_y_file, x_name) # adj.
if c_dict['fs_yes']:
# if not ((fs_file == tree_file) or (fs_file == fill_y_file)):
if fs_file not in (tree_file, fill_y_file):
data_fs, x_fs, obs_fs = get_data(fs_file, x_name) # adj.
fs_adjust = True
if c_dict['pred_mcf']:
data_pr, x_pr, obs_pr = get_data(predict_file, x_name)
else:
obs_pr = 0
if names_unordered: # List is not empty
if c_dict['train_mcf'] and c_dict['pred_mcf']:
x_total = pd.concat([x_tr, x_fy, x_pr], axis=0)
if fs_adjust:
x_total = pd.concat([x_total, x_fs], axis=0)
x_dummies = pd.get_dummies(x_total[names_unordered],
columns=names_unordered)
x_total = pd.concat([x_total, x_dummies], axis=1)
x_tr = x_total[:obs_tr]
x_fy = x_total[obs_tr:obs_tr+obs_fy]
x_pr = x_total[obs_tr+obs_fy:obs_tr+obs_fy+obs_pr]
if fs_adjust:
x_fs = x_total[obs_tr+obs_fy+obs_pr:]
elif c_dict['train_mcf'] and not c_dict['pred_mcf']:
x_total = pd.concat([x_tr, x_fy], axis=0)
if fs_adjust:
x_total = pd.concat([x_total, x_fs], axis=0)
x_dummies = pd.get_dummies(x_total[names_unordered],
columns=names_unordered)
x_total = | pd.concat([x_total, x_dummies], axis=1) | pandas.concat |
"""
A set of functions needed for DataCarousel app
"""
import random
import logging
import json
import time
import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale
import urllib.request as urllibr
from urllib.error import HTTPError
import cx_Oracle
from django.core.cache import cache
from django.utils.six.moves import cPickle as pickle
from django.db import connection
from core.settings.base import DATA_CAROUSEL_MAIL_REPEAT
from core.settings.local import dbaccess
from core.reports.sendMail import send_mail_bp
from core.reports.models import ReportEmails
from core.views import setupView
from core.libs.exlib import dictfetchall
from core.schedresource.utils import getCRICSEs
_logger = logging.getLogger('bigpandamon')
BASE_STAGE_INFO_URL = 'https://bigpanda.cern.ch/staginprogress/?jeditaskid='
#BASE_STAGE_INFO_URL = 'http://aipanda163.cern.ch:8000/staginprogress/?jeditaskid='
def getBinnedData(listData, additionalList1 = None, additionalList2 = None):
isTimeNotDelta = True
timesadd1 = None
timesadd2 = None
try:
times = pd.to_datetime(listData)
if additionalList1:
timesadd1 = pd.to_datetime(additionalList1)
if additionalList2:
timesadd2 = pd.to_datetime(additionalList2)
except:
times = pd.to_timedelta(listData)
isTimeNotDelta = False
if additionalList1:
timesadd1 = pd.to_timedelta(additionalList1)
if additionalList2:
timesadd2 = pd.to_timedelta(additionalList2)
df = pd.DataFrame({
"Count1": [1 for _ in listData]
}, index=times)
if not timesadd1 is None:
dfadd = pd.DataFrame({
"Count2": [1 for _ in additionalList1]
}, index=timesadd1)
result = pd.concat([df, dfadd])
else:
result = df
if not timesadd2 is None:
dfadd = pd.DataFrame({
"Count3": [1 for _ in additionalList2]
}, index=timesadd2)
result = | pd.concat([result, dfadd]) | pandas.concat |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sourcetracker._sourcetracker import (intersect_and_sort_samples,
collapse_source_data,
subsample_dataframe,
validate_gibbs_input,
validate_gibbs_parameters,
collate_gibbs_results,
get_samples,
generate_environment_assignments,
cumulative_proportions,
single_sink_feature_table,
ConditionalProbability,
gibbs_sampler, gibbs)
from sourcetracker._plot import plot_heatmap
class TestValidateGibbsInput(TestCase):
def setUp(self):
self.index = ['s%s' % i for i in range(5)]
self.columns = ['f%s' % i for i in range(4)]
def test_no_errors_(self):
# A table where nothing is wrong, no changes expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
exp_sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs = validate_gibbs_input(sources)
| pd.util.testing.assert_frame_equal(obs, sources) | pandas.util.testing.assert_frame_equal |
# streamlit run ./files.py/old_testing.py
import streamlit as st
import yfinance as yf
import pandas as pd
import numpy as np
import math
# programmatic calculations
import get12Data as g12d
import getAnalytics as gAna
def colorHeader(fontcolor = '#33ff33', fontsze = 30, msg="Enter some Text"):
st.markdown(f'<h1 style="color:{fontcolor};font-size:{fontsze}px;">{msg}</h1>', unsafe_allow_html=True)
def analytics(df):
df = addColumns(df)
df = addVolatile(df)
return df
def entryCond(s):
if (s['Close'] > s['RollMax'] ):
return 'YES'
elif (s['Close'] < s['RollMax'] ):
return 'NO'
else:
return 'NEUTRAL'
def addColumns(df):
new_df = df.copy(deep=True)
new_df['RollMax'] = new_df['High'].rolling(13, min_periods=1).max().shift(1)
new_df['RollMax'] = new_df['RollMax'].replace(np.nan, 0)
# make sure type of data in columns are ok
new_df['datetime'] = pd.to_datetime(df['datetime'])
new_df["Open"] = pd.to_numeric(new_df["Open"])
new_df["High"] = | pd.to_numeric(new_df["High"]) | pandas.to_numeric |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
sg = StringGrouper(df1, min_similarity=0.1)
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def test_overflow_error_with(OverflowThreshold, n_blocks):
nonlocal sg
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
max_left_block_size = (len(df1)//n_blocks[0]
+ (1 if len(df1) % n_blocks[0] > 0 else 0))
max_right_block_size = (len(df1)//n_blocks[1]
+ (1 if len(df1) % n_blocks[1] > 0 else 0))
if (max_left_block_size + max_right_block_size) > OverflowThreshold:
with self.assertRaises(Exception):
_ = sg.match_strings(df1, n_blocks=n_blocks)
else:
matches_manual = fix_row_order(sg.match_strings(df1, n_blocks=n_blocks))
pd.testing.assert_frame_equal(matches11, matches_manual)
test_overflow_error_with(OverflowThreshold=100, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(2, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 2))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(4, 4))
def test_n_blocks_both_DataFrames(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df['Customer Name']
df2 = simple_example.customers_df2['Customer Name']
matches11 = fix_row_order(match_strings(df1, df2, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, df2, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
def test_n_blocks_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=2)
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(0, 2))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2.5))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2, 3))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, ))
def test_tfidf_dtype_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=None)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=0)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype='whatever')
def test_compute_pairwise_similarities(self):
"""tests the high-level function compute_pairwise_similarities"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
similarities = compute_pairwise_similarities(df1, df2)
expected_result = pd.Series(
[
1.0,
0.6336195351561589,
1.0000000000000004,
1.0000000000000004,
1.0,
0.826462625999832
],
name='similarity'
)
expected_result = expected_result.astype(np.float32)
pd.testing.assert_series_equal(expected_result, similarities)
sg = StringGrouper(df1, df2)
similarities = sg.compute_pairwise_similarities(df1, df2)
pd.testing.assert_series_equal(expected_result, similarities)
def test_compute_pairwise_similarities_data_integrity(self):
"""tests that an exception is raised whenever the lengths of the two input series of the high-level function
compute_pairwise_similarities are unequal"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
with self.assertRaises(Exception):
_ = compute_pairwise_similarities(df1, df2[:-2])
@patch('string_grouper.string_grouper.StringGrouper')
def test_group_similar_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function group_similar_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = group_similar_strings(
test_series_1,
string_ids=test_series_id_1
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_most_similar(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_most_similar utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_2 = None
test_series_id_1 = None
test_series_id_2 = None
df = match_most_similar(
test_series_1,
test_series_2,
master_id=test_series_id_1,
duplicates_id=test_series_id_2
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_matches.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = match_strings(test_series_1, master_id=test_series_id_1)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_matches.assert_called_once()
self.assertEqual(df, 'whatever')
@patch(
'string_grouper.string_grouper.StringGrouper._symmetrize_matrix',
side_effect=mock_symmetrize_matrix
)
def test_match_list_symmetry_without_symmetrize_function(self, mock_symmetrize_matrix_param):
"""mocks StringGrouper._symmetrize_matches_list so that this test fails whenever _matches_list is
**partially** symmetric which often occurs when the kwarg max_n_matches is too small"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
mock_symmetrize_matrix_param.assert_called_once()
# obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# switch the column names of lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# if the intersection is empty then _matches_list is completely non-symmetric (this is acceptable)
# if the intersection is not empty then at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertFalse(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
def test_match_list_symmetry_with_symmetrize_function(self):
"""This test ensures that _matches_list is symmetric"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
# Obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# Switch the column names of the lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# Obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# If the intersection is empty this means _matches_list is completely non-symmetric (this is acceptable)
# If the intersection is not empty this means at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertTrue(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
@patch(
'string_grouper.string_grouper.StringGrouper._fix_diagonal',
side_effect=mock_symmetrize_matrix
)
def test_match_list_diagonal_without_the_fix(self, mock_fix_diagonal):
"""test fails whenever _matches_list's number of self-joins is not equal to the number of strings"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['<NAME>']
matches = match_strings(df, max_n_matches=1)
mock_fix_diagonal.assert_called_once()
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertNotEqual(num_self_joins, num_strings)
def test_match_list_diagonal(self):
"""This test ensures that all self-joins are present"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['Customer Name']
matches = match_strings(df, max_n_matches=1)
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertEqual(num_self_joins, num_strings)
def test_zero_min_similarity(self):
"""Since sparse matrices exclude zero elements, this test ensures that zero similarity matches are
returned when min_similarity <= 0. A bug related to this was first pointed out by @nbcvijanovic"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.whatever_series_1
matches = match_strings(s_master, s_dup, min_similarity=0)
pd.testing.assert_frame_equal(simple_example.expected_result_with_zeroes, matches)
def test_zero_min_similarity_small_max_n_matches(self):
"""This test ensures that a warning is issued when n_max_matches is suspected to be too small while
min_similarity <= 0 and include_zeroes is True"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.two_strings
with self.assertRaises(Exception):
_ = match_strings(s_master, s_dup, max_n_matches=1, min_similarity=0)
def test_get_non_matches_empty_case(self):
"""This test ensures that _get_non_matches() returns an empty DataFrame when all pairs of strings match"""
simple_example = SimpleExample()
s_master = simple_example.a_few_strings
s_dup = simple_example.one_string
sg = StringGrouper(s_master, s_dup, max_n_matches=len(s_master), min_similarity=0).fit()
self.assertTrue(sg._get_non_matches_list().empty)
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
c = csr_matrix([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
expected_matches = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe)[0].toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = | pd.Series(['FOO', 'bar', 'bop']) | pandas.Series |
# Based on SMS_Spam_Detection
# edited to run on local PC without GPU setup
import io
import re
import stanza
import pandas as pd
import tensorflow as tf
import stopwordsiso as stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
from gensim.models.word2vec import Word2Vec
import gensim.downloader as api
print("TensorFlow Version: " + tf.__version__)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nDownload Data\n - do this from notebook code")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nTest data reading:")
lines = io.open('data/SMSSpamCollection').read().strip().split('\n')
print(lines[0])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nPre-Process Data")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
spam_dataset = []
count = 0
for line in lines:
label, text = line.split('\t')
if label.lower().strip() == 'spam':
spam_dataset.append((1, text.strip()))
count += 1
else:
spam_dataset.append(((0, text.strip())))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nprint(spam_dataset[0])")
print(spam_dataset[0])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+'\n\nprint("Spam: ", count)')
print("Spam: ", count)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nData Normalization")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
df = pd.DataFrame(spam_dataset, columns=['Spam', 'Message'])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Normalization functions
def message_length(x):
# returns total number of characters
return len(x)
def num_capitals(x):
_, count = re.subn(r'[A-Z]', '', x) # only works in english
return count
def num_punctuation(x):
_, count = re.subn(r'\W', '', x)
return count
df['Capitals'] = df['Message'].apply(num_capitals)
df['Punctuation'] = df['Message'].apply(num_punctuation)
df['Length'] = df['Message'].apply(message_length)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nCorpus:")
print(df.describe())
train = df.sample(frac=0.8,random_state=42) #random state is a seed value
test = df.drop(train.index)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nTrain:")
print(train.describe())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nTest:")
print(train.describe())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nModel Building")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Basic 1-layer neural network model for evaluation
def make_model(input_dims=3, num_units=12):
model = tf.keras.Sequential()
# Adds a densely-connected layer with 12 units to the model:
model.add(tf.keras.layers.Dense(num_units,
input_dim=input_dims,
activation='relu'))
# Add a sigmoid layer with a binary output unit:
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
x_train = train[['Length', 'Punctuation', 'Capitals']]
y_train = train[['Spam']]
x_test = test[['Length', 'Punctuation', 'Capitals']]
y_test = test[['Spam']]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nx_train:")
print(x_train)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80 * "~") + "\n\nmodel = make_model():")
model = make_model()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nmodel.fit(x_train, y_train, epochs=10, batch_size=10)")
model.fit(x_train, y_train, epochs=10, batch_size=10)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nmodel.evaluation(x_test, y_test)")
model.evaluate(x_test, y_test)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\ny_train_pred = model.predict_classes(x_train)")
y_train_pred = model.predict_classes(x_train)
#print((80*"~")+"\n\ny_train_pred = np.argmax(model.predict(x_train), axis=-1)")
#y_train_pred: object = np.argmax(model.predict(x_train), axis=-1)
# confusion matrix
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\ntf.math.confusion_matrix(tf.constant(y_train.Spam), y_train_pred)")
print(tf.math.confusion_matrix(tf.constant(y_train.Spam), y_train_pred))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nsum(y_train_pred)")
print(sum(y_train_pred))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\ny_test_pred = model.predict_classes(x_test)")
y_test_pred = model.predict_classes(x_test)
#print((80*"~")+"\n\ny_train_pred = np.argmax(model.predict(x_test), axis=-1)")
#y_test_pred = np.argmax(model.predict(x_test), axis=-1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\ntf.math.confusion_matrix(tf.constant(y_test.Spam), y_test_pred)")
print(tf.math.confusion_matrix(tf.constant(y_test.Spam), y_test_pred))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nTokenization and Stop Word Removal"+(80*"~"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sentence = 'Go until jurong point, crazy.. Available only in bugis n great world'
sentence.split()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nen = stanza.download('en')")
en = stanza.download('en')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80 * "~") + "\n\nen = stanza.Pipeline(lang='en')")
en = stanza.Pipeline(lang='en')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nprint(sentence)")
print(sentence)
tokenized = en(sentence)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nprint(len(tokenized.sentences))")
print(len(tokenized.sentences))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nprint(<End of Sentence>)")
for snt in tokenized.sentences:
for word in snt.tokens:
print(word.text)
print("<End of Sentence>")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nDependency Parsing Example\n"+(80*"~"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nen2 = stanza.Pipeline(lang='en')")
en2 = stanza.Pipeline(lang='en')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nprint(<End of Sentence>)")
pr2 = en2("Hari went to school")
for snt in pr2.sentences:
for word in snt.tokens:
print(word)
print("<End of Sentence>")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nJapanese Tokenization Example"+(80*"~"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\njp = stanza.download('ja')")
jp = stanza.download('ja')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\njp = stanza.Pipeline(lang='ja')")
jp = stanza.Pipeline(lang='ja')
jp_line = jp("選挙管理委員会")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nsnt.tokens")
for snt in jp_line.sentences:
for word in snt.tokens:
print(word.text)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nAdding Word Count Feature"+(80*"~"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def word_counts(x, pipeline=en):
doc = pipeline(x)
count = sum( [ len(sentence.tokens) for sentence in doc.sentences] )
return count
#en = snlp.Pipeline(lang='en', processors='tokenize')
df['Words'] = df['Message'].apply(word_counts)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nCorpus: (Words added)")
print(df.describe())
#train=df.sample(frac=0.8,random_state=42) #random state is a seed value
#test=df.drop(train.index)
train['Words'] = train['Message'].apply(word_counts)
test['Words'] = test['Message'].apply(word_counts)
x_train = train[['Length', 'Punctuation', 'Capitals', 'Words']]
y_train = train[['Spam']]
x_test = test[['Length', 'Punctuation', 'Capitals' , 'Words']]
y_test = test[['Spam']]
model = make_model(input_dims=4)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nmodel.fit(x_train, y_train, epochs=10, batch_size=10)")
model.fit(x_train, y_train, epochs=10, batch_size=10)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nmodel.evaluate(x_test, y_test)")
model.evaluate(x_test, y_test)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nStop Word Removal")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nprint(stopwords.langs())")
print(stopwords.langs())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nprint(sorted(stopwords.stopwords('en')))")
print(sorted(stopwords.stopwords('en')))
en_sw = stopwords.stopwords('en')
def word_counts(x, pipeline=en):
doc = pipeline(x)
count = 0
for sentence in doc.sentences:
for token in sentence.tokens:
if token.text.lower() not in en_sw:
count += 1
return count
train['Words'] = train['Message'].apply(word_counts)
test['Words'] = test['Message'].apply(word_counts)
x_train = train[['Length', 'Punctuation', 'Capitals', 'Words']]
y_train = train[['Spam']]
x_test = test[['Length', 'Punctuation', 'Capitals' , 'Words']]
y_test = test[['Spam']]
model = make_model(input_dims=4)
#model = make_model(input_dims=3)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nmodel.fit(x_train, y_train, epochs=10, batch_size=10)")
model.fit(x_train, y_train, epochs=10, batch_size=10)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\n"+(80*"~")+"\nPOS Based Features"+(80*"~"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~")+"\n\nen = stanza.Pipeline(lang='en')")
en = stanza.Pipeline(lang='en')
txt = "Yo you around? A friend of mine's lookin."
pos = en(txt)
def print_pos(doc):
text = ""
for sentence in doc.sentences:
for token in sentence.tokens:
text += token.words[0].text + "/" + \
token.words[0].upos + " "
text += "\n"
return text
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nprint(print_pos(pos))")
print(print_pos(pos))
en_sw = stopwords.stopwords('en')
def word_counts_v3(x, pipeline=en):
doc = pipeline(x)
count = 0
for sentence in doc.sentences:
for token in sentence.tokens:
if token.text.lower() not in en_sw and token.words[0].upos not in ['PUNCT', 'SYM']:
count += 1
return count
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nprint(word_counts(txt), word_counts_v3(txt))")
print(word_counts(txt), word_counts_v3(txt))
train['Test'] = 0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nprint(train.describe())")
print(train.describe())
def word_counts_v3(x, pipeline=en):
doc = pipeline(x)
totals = 0.
count = 0.
non_word = 0.
for sentence in doc.sentences:
totals += len(sentence.tokens) # (1)
for token in sentence.tokens:
if token.text.lower() not in en_sw:
if token.words[0].upos not in ['PUNCT', 'SYM']:
count += 1.
else:
non_word += 1.
non_word = non_word / totals
return pd.Series([count, non_word], index=['Words_NoPunct', 'Punct'])
x = train[:10]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\nx.describe()")
print(x.describe())
train_tmp = train['Message'].apply(word_counts_v3)
train = pd.concat([train, train_tmp], axis=1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\ntrain.describe()")
print(train.describe())
test_tmp = test['Message'].apply(word_counts_v3)
test = pd.concat([test, test_tmp], axis=1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print((80*"~") + "\n\ntest.describe()")
print(test.describe())
z = | pd.concat([x, train_tmp], axis=1) | pandas.concat |
###############################################################################
from functools import partial
from math import sqrt
from copy import deepcopy
import operator, sys
import json
import pandas as pd
import numpy as np
from scipy.io import arff
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Perceptron
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.cluster import KMeans
from prefit_voting_classifier import PrefitVotingClassifier
def load_experiment_configuration():
STRATEGY_PERCENTAGE = 0.5
N_JOBS = -1
PRUNNING_CLUSTERS = 10
config = {
"num_folds": 10,
"pool_size": 100,
"kdn": 5,
"strategy_percentage": STRATEGY_PERCENTAGE,
"validation_hardnesses": _create_validation_hardnesses(threshold = 0.5),
"base_classifier": partial(Perceptron, max_iter = 20, tol = 0.001,
penalty = None, n_jobs = N_JOBS),
"generation_strategy": partial(BaggingClassifier,
max_samples = STRATEGY_PERCENTAGE,
n_jobs = -1),
"pruning_strategies": _create_pruning_strategies(PRUNNING_CLUSTERS,
N_JOBS),
"diversity_measures": _create_diversity_measures()
}
return config
def _create_validation_hardnesses(threshold):
return [("None", partial(operator.gt, 2)),
("Hard", partial(operator.lt, threshold)),
("Easy", partial(operator.gt, threshold))]
def _create_diversity_measures():
return [partial(_disagreement_measure), partial(_double_fault_measure)]
def _create_pruning_strategies(num_clusters, n_jobs):
return [("Best First", partial(_best_first_pruning)),
("K Best Means", partial(_k_best_means_pruning, k=num_clusters,
n_jobs = n_jobs))]
def calculate_pool_diversity(measure_fn, pool, instances, gold_labels, pool_size):
if pool_size <= 1:
return 0
error_vectors = [_get_error_vector(estim, instances, gold_labels) for estim \
in pool.estimators]
summed_diversity = 0
for i in xrange(pool_size-1):
for j in xrange(i+1, pool_size):
matrix = _create_agreement_matrix(error_vectors[i], error_vectors[j])
summed_diversity += measure_fn(matrix)
return _average_pairs_diversity(summed_diversity, pool_size)
def _average_pairs_diversity(summed_diversity, pool_size):
return (2*summed_diversity)/(pool_size*(pool_size-1))
def _get_error_vector(clf, instances, gold_labels):
predicted = clf.predict(instances)
return [predicted[i]==gold_labels[i] for i in xrange(len(gold_labels))]
def _create_agreement_matrix(di_vector, dj_vector):
d00 = _get_agreement_matrix_position(False, False, di_vector, dj_vector)
d01 = _get_agreement_matrix_position(False, True, di_vector, dj_vector)
d10 = _get_agreement_matrix_position(True, False, di_vector, dj_vector)
d11 = _get_agreement_matrix_position(True, True, di_vector, dj_vector)
return [[d00, d01], [d10, d11]]
def _get_agreement_matrix_position(err_i, err_j, vec_i, vec_j):
xrg = xrange(len(vec_i))
agreement_vector = [vec_i[p] == err_i and vec_j[p] == err_j for p in xrg]
filtered = filter(lambda b: b == True, agreement_vector)
return len(filtered)
def _disagreement_measure(agreement_matrix):
num = agreement_matrix[0][1] + agreement_matrix[1][0]
den = sum(agreement_matrix[0]) + sum(agreement_matrix[1])
return float(num)/den
def _double_fault_measure(agreement_matrix):
num = agreement_matrix[0][0]
den = sum(agreement_matrix[0]) + sum(agreement_matrix[1])
return float(num)/den
def _find_k_neighbours(distances, k):
matrix_neighbours = []
for i in xrange(len(distances)):
cur_neighbours = set()
while len(cur_neighbours) < k:
min_ix = np.argmin(distances[i])
distances[i, min_ix] = sys.float_info.max
if min_ix != i:
cur_neighbours.add(min_ix)
matrix_neighbours.append(list(cur_neighbours))
return matrix_neighbours
def _calculate_kdn_hardness(instances, gold_labels, k):
distances = euclidean_distances(instances, instances)
neighbours = _find_k_neighbours(distances, k)
hards = []
for i in xrange(len(neighbours)):
fixed_label = gold_labels[i]
k_labels = gold_labels[neighbours[i]]
dn = sum(map(lambda label: label != fixed_label, k_labels))
hards.append(float(dn)/k)
return hards
def select_validation_set(instances, labels, operator, k):
hards = _calculate_kdn_hardness(instances, labels, k)
filtered_triples = _filter_based_hardness(instances, labels, hards, operator)
validation_instances = [t[0] for t in filtered_triples]
validation_labels = [t[1] for t in filtered_triples]
return np.array(validation_instances), validation_labels
def _filter_based_hardness(instances, labels, hards, op):
triples = [(instances[i], labels[i], hards[i]) for i in xrange(len(hards))]
return filter(lambda t: op(t[2]), triples)
def _order_clfs(pool_clf, validation_instances, validation_labels):
clfs = pool_clf.estimators_
clfs_feats = pool_clf.estimators_features_
predictions = [clf.predict(validation_instances) for clf in clfs]
errors = [_error_score(validation_labels, predicted_labels) for predicted_labels in predictions]
triples = [(clfs[i], clfs_feats[i], errors[i]) for i in xrange(len(errors))]
return sorted(triples, key=lambda t: t[2])
def _find_k_clusters(pool_clf, k, n_jobs):
clfs = pool_clf.estimators_
clfs_feats = pool_clf.estimators_features_
pool_weights = [clf.coef_[0] for clf in clfs]
k_means = KMeans(n_clusters = k, n_jobs = n_jobs)
clusters_labels = k_means.fit_predict(pool_weights)
clusters = {cluster_label: [] for cluster_label in clusters_labels}
for i in xrange(len(clfs)):
cluster = clusters_labels[i]
clusters[cluster].append((clfs[i], clfs_feats[i]))
return clusters
def _find_best_per_cluster(clusters, validation_instances, validation_labels):
best_k_clf = []
best_k_feats = []
for cluster, clfs_tuples in clusters.iteritems():
cur_best_clf = None
cur_best_feats = None
cur_best_error = 100
for clf_tuple in clfs_tuples:
clf = clf_tuple[0]
predicted = clf.predict(validation_instances)
error = _error_score(validation_labels, predicted)
if error < cur_best_error:
cur_best_error = error
cur_best_clf = clf
cur_best_feats = clf_tuple[1]
best_k_clf.append(cur_best_clf)
best_k_feats.append(cur_best_feats)
return _get_voting_clf(best_k_clf, best_k_feats)
def _k_best_means_pruning(pool_clf, validation_instances, validation_labels, k, n_jobs):
clusters = _find_k_clusters(pool_clf, k, n_jobs)
return _find_best_per_cluster(clusters, validation_instances, validation_labels)
def _find_best_first(triples, validation_instances, validation_labels):
best_ensemble_error = 100
best_ensemble = None
cur_clfs = []
cur_feats = []
for triple in triples:
clf, clf_feat, error = triple
cur_clfs.append(clf)
cur_feats.append(clf_feat)
ensemble = _get_voting_clf(cur_clfs, cur_feats)
predicted = ensemble.predict(validation_instances)
error = _error_score(validation_labels, predicted)
if error < best_ensemble_error:
best_ensemble_error = error
best_ensemble = ensemble
return best_ensemble
def _best_first_pruning(pool_clf, validation_instances, validation_labels):
ordered_triples = _order_clfs(pool_clf, validation_instances,
validation_labels)
return _find_best_first(ordered_triples, validation_instances,
validation_labels)
def _get_voting_clf(base_clfs, clfs_feats):
pool_size = len(base_clfs)
clfs_tuples = [(str(i), base_clfs[i]) for i in xrange(pool_size)]
return PrefitVotingClassifier(clfs_tuples, clfs_feats, voting = 'hard')
def get_voting_pool_size(voting_pool):
return len(voting_pool.estimators)
def load_datasets_filenames():
filenames = ["cm1", "jm1"]
return filenames
def load_dataset(set_filename):
SET_PATH = "../data/"
FILETYPE = ".arff"
full_filepath = SET_PATH + set_filename + FILETYPE
data, _ = arff.loadarff(full_filepath)
dataframe = pd.DataFrame(data)
dataframe.dropna(inplace=True)
gold_labels = pd.DataFrame(dataframe["defects"])
instances = dataframe.drop(columns = "defects")
return instances, gold_labels
def save_predictions(data):
with open('../predictions/all_predictions.json', 'w') as outfile:
json.dump(data, outfile)
def load_predictions_data():
with open('../predictions/all_predictions.json', 'r') as outfile:
return json.load(outfile)
def _error_score(gold_labels, predicted_labels):
return 1 - accuracy_score(gold_labels, predicted_labels)
def _g1_score(gold_labels, predicted_labels, average):
precision = precision_score(gold_labels, predicted_labels, average=average)
recall = recall_score(gold_labels, predicted_labels, average=average)
return sqrt(precision*recall)
def _calculate_metrics(gold_labels, data):
predicted_labels = data[0]
final_pool_size = data[1]
disagreement = data[2]
double_fault = data[3]
metrics = {}
metrics["auc_roc"] = roc_auc_score(gold_labels, predicted_labels, average='macro')
metrics["g1"] = _g1_score(gold_labels, predicted_labels, average='macro')
metrics["f1"] = f1_score(gold_labels, predicted_labels, average='macro')
metrics["acc"] = accuracy_score(gold_labels, predicted_labels)
metrics["pool"] = final_pool_size
metrics["disagr"] = disagreement
metrics["2xfault"] = double_fault
return metrics
def _check_create_dict(given_dict, new_key):
if new_key not in given_dict.keys():
given_dict[new_key] = {}
def generate_metrics(predictions_dict):
metrics = {}
for set_name, set_dict in predictions_dict.iteritems():
metrics[set_name] = {}
for fold, fold_dict in set_dict.iteritems():
gold_labels = fold_dict["gold_labels"]
del fold_dict["gold_labels"]
for hardness_type, filter_dict in fold_dict.iteritems():
_check_create_dict(metrics[set_name], hardness_type)
for strategy, data_arr in filter_dict.iteritems():
metrics_str = metrics[set_name][hardness_type]
fold_metrics = _calculate_metrics(gold_labels, data_arr)
if strategy not in metrics_str.keys():
metrics_str[strategy] = [fold_metrics]
else:
metrics_str[strategy].append(fold_metrics)
return metrics
def _summarize_metrics_folds(metrics_folds):
summary = {}
metric_names = metrics_folds[0].keys()
for metric_name in metric_names:
scores = [metrics_folds[i][metric_name] for i in xrange(len(metrics_folds))]
summary[metric_name] = [np.mean(scores), np.std(scores)]
return summary
def summarize_metrics_folds(metrics_dict):
summary = deepcopy(metrics_dict)
for set_name, set_dict in metrics_dict.iteritems():
for hardness_type, filter_dict in set_dict.iteritems():
for strategy, metrics_folds in filter_dict.iteritems():
cur_summary = _summarize_metrics_folds(metrics_folds)
summary[set_name][hardness_type][strategy] = cur_summary
return summary
def pandanize_summary(summary):
df = pd.DataFrame(columns = ['set', 'hardness', 'strategy',
'mean_auc_roc', 'std_auc_roc', 'mean_acc', 'std_acc',
'mean_f1', 'std_f1', 'mean_g1', 'std_g1',
'mean_pool', 'std_pool', 'mean_2xfault',
'std_2xfault', 'mean_disagr', 'std_disagr'])
for set_name, set_dict in summary.iteritems():
for hardness_type, filter_dict in set_dict.iteritems():
for strategy, summary_folds in filter_dict.iteritems():
df_folds = pd.DataFrame(_unfilled_row(3, 14),
columns = df.columns)
_fill_dataframe_folds(df_folds, summary_folds, set_name,
hardness_type, strategy)
df = df.append(df_folds)
return df.reset_index(drop = True)
def _unfilled_row(str_columns, float_columns):
row = [" " for i in xrange(str_columns)]
row.extend([0.0 for j in xrange(float_columns)])
return [row]
def _fill_dataframe_folds(df, summary, set_name, hardness, strategy):
df.at[0, "set"] = set_name
df.at[0, "hardness"] = hardness
df.at[0, "strategy"] = strategy
return _fill_dataframe_metrics(df, summary)
def _fill_dataframe_metrics(df, summary):
for key, metrics in summary.iteritems():
df.at[0, "mean_" + key] = metrics[0]
df.at[0, "std_" + key] = metrics[1]
return df
def save_pandas_summary(df):
| pd.to_pickle(df, '../metrics/metrics_summary.pkl') | pandas.to_pickle |
# coding: utf-8
# In[1]:
# import librerie
import os
import tweepy
import facebook
import requests
import datetime
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import json
import requests
# In[2]:
# configuration file
config = {}
config_path = os.path.join(os.path.abspath('../../'))
config_name = 'config.py'
config_file = os.path.join(config_path,config_name)
exec(open(config_file).read(),config)
nw_key=config['TOKEN_NW']
# In[3]:
# <NAME>
users = [
{'user':config['USER1'],'user_id':config['USER1_ID_TW']},
{'user':config['USER2'],'user_id':config['USER2_ID_TW']},
{'user':config['USER3'],'user_id':config['USER3_ID_TW']},
{'user':config['USER4'],'user_id':config['USER4_ID_TW']},
{'user':config['USER7'],'user_id':config['USER7_ID_TW']}
]
# In[4]:
# get today's date
todays_date = datetime.datetime.now()
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
str_dt = str(yesterday.date())
# In[12]:
def get_user_news(user,dt,today,key):
url = ('https://newsapi.org/v2/everything?'
'q='+user+'&'
'from='+dt+'&'
'sortBy=publishedAt&'
'language=it&'
'apiKey='+key)
response = requests.get(url)
json_data = json.loads(response.text)
articles = json_data['articles']
l_article = []
for art in articles[0:5]:
d_article = {
'user':user,
'autore':art['author'],
'desc':art['description'],
'pubAt':art['publishedAt'],
'fonte':art['source']['name'],
'titolo':art['title'],
'url':art['url'],
'img':art['urlToImage'],
'dt_rif':today
}
l_article.append(d_article)
return l_article
# In[13]:
l = []
for user in users:
l.append(get_user_news(user['user'],str_dt,todays_date,nw_key))
# In[21]:
df0_user = pd.DataFrame(l[0])
df1_user = pd.DataFrame(l[1])
df2_user = pd.DataFrame(l[2])
df3_user = | pd.DataFrame(l[3]) | pandas.DataFrame |
import sys
import os
import pytest
import mock
from keras.models import Sequential
from keras.layers import Dense
import sklearn.datasets as datasets
import pandas as pd
import numpy as np
import yaml
import tensorflow as tf
import mlflow
import mlflow.keras
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.models import infer_signature, Model
from mlflow.models.utils import _read_example
from mlflow.utils.file_utils import TempDir
from tests.helper_functions import pyfunc_serve_and_score_model
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
pytestmark = pytest.mark.skipif(
(sys.version_info < (3, 6)), reason="Tests require Python 3 to run!"
)
@pytest.fixture(scope="module")
def data():
iris = datasets.load_iris()
data = pd.DataFrame(
data=np.c_[iris["data"], iris["target"]], columns=iris["feature_names"] + ["target"]
)
y = data["target"]
x = data.drop("target", axis=1)
return x, y
@pytest.fixture(scope="module")
def model(data):
x, y = data
model = Sequential()
model.add(Dense(3, input_dim=4))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer="SGD")
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def onnx_model(model):
import onnxmltools
return onnxmltools.convert_keras(model)
@pytest.fixture(scope="module")
def sklearn_model(data):
from sklearn.linear_model import LogisticRegression
x, y = data
model = LogisticRegression()
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def onnx_sklearn_model(sklearn_model):
import onnxmltools
from skl2onnx.common.data_types import FloatTensorType
initial_type = [("float_input", FloatTensorType([None, 4]))]
onx = onnxmltools.convert_sklearn(sklearn_model, initial_types=initial_type)
return onx
@pytest.fixture(scope="module")
def predicted(model, data):
return model.predict(data[0])
@pytest.fixture(scope="module")
def tf_model_multiple_inputs_float64():
graph = tf.Graph()
with graph.as_default():
t_in1 = tf.placeholder(tf.float64, 10, name="first_input")
t_in2 = tf.placeholder(tf.float64, 10, name="second_input")
t_out = tf.multiply(t_in1, t_in2)
t_out_named = tf.identity(t_out, name="output")
return graph
@pytest.fixture(scope="module")
def tf_model_multiple_inputs_float32():
graph = tf.Graph()
with graph.as_default():
t_in1 = tf.placeholder(tf.float32, 10, name="first_input")
t_in2 = tf.placeholder(tf.float32, 10, name="second_input")
t_out = tf.multiply(t_in1, t_in2)
t_out_named = tf.identity(t_out, name="output")
return graph
@pytest.fixture(scope="module")
def onnx_model_multiple_inputs_float64(tf_model_multiple_inputs_float64):
import tf2onnx
sess = tf.Session(graph=tf_model_multiple_inputs_float64)
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
sess.graph, input_names=["first_input:0", "second_input:0"], output_names=["output:0"]
)
model_proto = onnx_graph.make_model("test")
return model_proto
@pytest.fixture(scope="module")
def onnx_model_multiple_inputs_float32(tf_model_multiple_inputs_float32):
import tf2onnx
sess = tf.Session(graph=tf_model_multiple_inputs_float32)
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
sess.graph, input_names=["first_input:0", "second_input:0"], output_names=["output:0"]
)
model_proto = onnx_graph.make_model("test")
return model_proto
@pytest.fixture(scope="module")
def data_multiple_inputs():
return pd.DataFrame(
{"first_input:0": np.random.random(10), "second_input:0": np.random.random(10)}
)
@pytest.fixture(scope="module")
def predicted_multiple_inputs(data_multiple_inputs):
return pd.DataFrame(
data_multiple_inputs["first_input:0"] * data_multiple_inputs["second_input:0"]
)
@pytest.fixture
def model_path(tmpdir):
return os.path.join(tmpdir.strpath, "model")
@pytest.fixture
def onnx_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(
conda_env,
additional_conda_deps=["pytest", "keras"],
additional_pip_deps=["onnx", "onnxmltools"],
)
return conda_env
@pytest.mark.large
def test_cast_float64_to_float32():
import mlflow.onnx
df = | pd.DataFrame([[1.0, 2.1], [True, False]], columns=["col1", "col2"]) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.slow
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(10 ** 6).reshape(100, -1)
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = | pd.Timedelta(days=1) | pandas.Timedelta |
import pandas as pd
import os
from os.path import isfile, join
import time
from util import Log
class transform_data:
def __init__(self, dir_name='workspace/data/', length=10800):
self.dir_name = dir_name
self.length = length
self.df = None
self.columns = ['date', 'pair', 'change', 'buy', 'sell']
def init(self):
print('start init...')
self._load_all_old2()
print('init finish')
def get_1s(self):
try:
latest_df = pd.read_csv(self.dir_name+'latest.csv')
except:
latest_df = pd.DataFrame()
dir_name = self.dir_name+'/all/'
self.files = [f for f in os.listdir(dir_name) if isfile(join(dir_name, f))]
df = pd.DataFrame()
if 'all.csv' in self.files:
self.files.remove('all.csv')
# collect all data
for fn in self.files:
curr_df = pd.read_csv(dir_name+fn)
df = pd.concat([df, curr_df], sort=False)
df = pd.concat([df, latest_df], sort=False)
df = self._set_types(df)
try:
all_df = pd.read_csv(dir_name+'all.csv')
all_df['date'] = pd.to_datetime(all_df['date'])
df = pd.concat([all_df, df], sort=False)
del all_df
except:
pass
df = df.drop_duplicates()
df = df.sort_values(by='date')
self.df = df.reset_index(drop=True)
self.latest_time = self.df.tail(1)['date'].values[0]
self.df = self.df.loc[(self.latest_time-self.df['date']).dt.total_seconds()<self.length]
self.df[self.columns].to_csv(dir_name+'all.csv', index=False)
for fn in self.files:
os.rename(dir_name+fn, dir_name+'old/'+fn)
del df
del latest_df
return self.df
def _load_all_old(self):
dir_name = self.dir_name+'all/old/'
files = [f for f in os.listdir(dir_name) if isfile(join(dir_name, f))]
df = pd.DataFrame()
for fn in files:
curr = pd.read_csv(dir_name+fn)
df = pd.concat([df, curr], sort=False)
df = self._set_types(df)
df = df.sort_values(by='date')
self.latest_time = df.tail(1)['date'].values[0]
self.n_pairs = len(self.df['pair'].unique())
return df
def _load_all_old2(self):
dir_name = self.dir_name+'all/old/'
files = [f for f in os.listdir(dir_name) if isfile(join(dir_name, f))]
df = pd.DataFrame()
df1m = pd.DataFrame()
df5m = pd.DataFrame()
df30m = pd.DataFrame()
df1h = pd.DataFrame()
pre = pd.DataFrame()
cnt = 0
files.sort()
for fn in files:
print(fn)
curr = pd.read_csv(dir_name+fn)
df = pd.concat([df, curr], sort=False)
if cnt % 10 == 9:
big_block = pd.concat([pre, df], sort=False)
big_block = self._set_types(big_block)
big_block = big_block.sort_values(by='date')
big_block = big_block.set_index('date')
n_pairs = len(big_block['pair'].unique())
# transform big block in different level
tmp1m = big_block.groupby(['pair']).resample('1min').mean()
tmp5m = big_block.groupby(['pair']).resample('5min').mean()
tmp30m = big_block.groupby(['pair']).resample('30min').mean()
tmp1h = big_block.groupby(['pair']).resample('1H').mean()
tmp1m = tmp1m.sort_values(by='date')
tmp5m = tmp5m.sort_values(by='date')
tmp30m = tmp30m.sort_values(by='date')
tmp1h = tmp1h.sort_values(by='date')
# delete first and end
tmp1m = tmp1m[n_pairs:-n_pairs].reset_index()
tmp5m = tmp5m[n_pairs:-n_pairs].reset_index()
tmp30m = tmp30m[n_pairs:-n_pairs].reset_index()
tmp1h = tmp1h[n_pairs:-n_pairs].reset_index()
tmp1m = tmp1m.dropna()
tmp5m = tmp5m.dropna()
tmp30m = tmp30m.dropna()
tmp1h = tmp1h.dropna()
df1m = pd.concat([df1m, tmp1m], sort=False)
df5m = pd.concat([df5m, tmp5m], sort=False)
df30m = pd.concat([df30m, tmp30m], sort=False)
df1h = pd.concat([df1h, tmp1h], sort=False)
# delete duplicates
df1m = df1m.drop_duplicates()
df5m = df5m.drop_duplicates()
df30m = df30m.drop_duplicates()
df1h = df1h.drop_duplicates()
# control the length of diffferent level
df1m = df1m[-10000*n_pairs:]
df5m = df5m[-10000*n_pairs:]
df30m = df30m[-10000*n_pairs:]
df1h = df1h[-10000*n_pairs:]
pre = df
df = pd.DataFrame()
cnt += 1
# write files
df1m[self.columns].to_csv(self.dir_name+'all/1m/all.csv', index=False)
df5m[self.columns].to_csv(self.dir_name+'all/5m/all.csv', index=False)
df30m[self.columns].to_csv(self.dir_name+'all/30m/all.csv', index=False)
df1h[self.columns].to_csv(self.dir_name+'all/1h/all.csv', index=False)
self.n_pairs = n_pairs
del df
del df1m
del df5m
del df30m
del df1h
del pre
del tmp1m
del tmp5m
del tmp30m
del tmp1h
del curr
def load_online_data(self, name, file_dir='workspace/data/', min_records=500):
source_dir = file_dir+name+'/base/'
self.restore_dir = file_dir + name +'/transformed_online/'
fns = os.listdir(source_dir)
data = []
for fn in fns:
curr = pd.read_csv(fn)
if len(curr) < min_records:
del curr
else:
data.append(curr)
return data
def _set_types(self, df):
if 'date' not in df.columns:
return pd.DataFrame()
df['date'] = pd.to_datetime(df['date'])
df['change'] = df['change'].str.rstrip('%').astype('float')/100.0
return df
def _transform(self, period, times, old):
ret = self.df.set_index(['date'])
ret = ret.groupby(['pair']).resample(period).mean()
ret = ret.reset_index()
if 'date' not in ret.columns:
print(len(ret))
print('ret columns:', ret.columns)
ret = ret.sort_values(by='date')
ret = ret[self.n_pairs: -self.n_pairs]
ret = pd.concat([old, ret], sort=False)
ret = ret.loc[(self.latest_time-ret['date']).dt.total_seconds()<self.length*times]
ret = ret.drop_duplicates()
return ret
def get_1m(self):
old = pd.read_csv(self.dir_name+'all/1m/all.csv')
old['date'] = | pd.to_datetime(old['date']) | pandas.to_datetime |
import pandas as pd
import pandasql as ps
import plotly.graph_objs as go
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from .models import Expense, ExpenseType
from .forms import (
ExpenseForm,
AuthenticationFormWithCaptchaField,
DateRangeForm
)
from django.contrib.auth import (
login, logout, authenticate)
from django.contrib import messages
from .signals import log_user_logout
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from rest_framework import viewsets
from .serializers import ExpenseTypeSerializer
from pretty_html_table import build_table
from django.utils import timezone
from plotly.offline import plot
# Set float values to 2 decimal places
pd.options.display.float_format = '{:,.2f}'.format
class ExpenseTypeView(viewsets.ModelViewSet):
# Class for expense type view set
# Select all expense types
queryset = ExpenseType.objects.all()
# Serialize class
serializer_class = ExpenseTypeSerializer
def index(request):
# Function renders landing page
return render(
request=request,
template_name='expense_tracking/index.html',
)
def login_request(request):
# Function for handling login request
# Verify the user is not logged into the application
if not request.user.is_authenticated:
# For user submitting the form
if request.method == "POST":
form = AuthenticationFormWithCaptchaField(
request, data=request.POST
)
# If the form is valid, present user with a success alert
# and redirect the the expense list
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
messages.success(
request,
f'{username} logged in successfully.'
)
return redirect('expense_tracking:expenses')
# Check if the users exist
elif User.objects.filter(
username=form.cleaned_data.get('username')).exists():
user = User.objects.filter(
username=form.cleaned_data.get('username')).values()
# If the user's profile is inactive, alert the user to
# contact the admin
if(user[0]['is_active'] is False):
messages.info(
request,
'Contact the administrator to activate your account!'
)
return redirect('expense_tracking:login_request')
# Present form to the user with any errors
else:
return render(
request=request,
template_name='expense_tracking/login.html',
context={'form': form}
)
# Present form to the user with any errors
else:
return render(
request=request,
template_name='expense_tracking/login.html',
context={'form': form}
)
# When the form is NOT being submitted, present to form to the user
else:
form = AuthenticationFormWithCaptchaField()
return render(
request=request,
template_name='expense_tracking/login.html',
context={'form': form}
)
# If the user is already logged into the application, provide alert
# to inform the user and redirect her/him to the expenses list
else:
messages.info(
request,
'''You are already logged in. You must log out to log in as
another user.'''
)
# Redirect user to table of expense
return redirect('expense_tracking:expenses')
def password_reset_complete(request):
return render(
request=request,
template_name='accounts/password_reset_complete.html'
)
@login_required()
def expenses(request):
# Function requires user to be logged in and renders a table of expenses
# showing the date, type organization, amount and notes (with edit and
# delete buttons)
# Set up pagination
# Get 50 expenses per page, by expense date descending
p = Paginator(Expense.objects.order_by(
'-expense_date',
'expense_type__name',
'name',
'org'
), 50)
page = request.GET.get('page')
my_expenses = p.get_page(page)
distinct_expense_types = ExpenseType.objects.all()
# Render expense table list 50 expense per page with page navigation at
# the bottom of the page
return render(request=request,
template_name='expense_tracking/expense.html',
context={
'my_expenses': my_expenses,
'distinct_expense_types': distinct_expense_types
}
)
@login_required()
def add_expense(request):
# Function for adding an expense. User must be logged in to access.
# Obtain expense records ordered by expense name
expense_types = ExpenseType.objects.order_by('name')
# When the form method is post
if request.method == 'POST':
form = ExpenseForm(request.POST)
# Check if form is valid
if form.is_valid():
# If form is valid, save values to database
form.save()
# Display successful alert message
messages.success(
request,
'Expense added successfully.'
)
# Redirect user to table of expense
return redirect('expense_tracking:expenses')
# When form is invalid
else:
# Render the form, including drop down values for expense types
# and display an form field errors at the top of the page in red
return render(
request=request,
template_name='expense_tracking/add_expense.html',
context={
'form': form,
'expense_types': expense_types
}
)
# When the form method is get
else:
# Generate expense form
form = ExpenseForm()
# Render the form, including drop down values for expense types
return render(
request=request,
template_name="expense_tracking/add_expense.html",
context={
'form': form,
'expense_types': expense_types
}
)
@login_required
def edit_expense(request, id):
# Function for adding an expense. User must be logged in to access.
# Obtain expense record to edit by id
expense_to_edit = Expense.objects.get(id=id)
# Obtain list of expense types in order by name, except the selected value
# by id from the form
expense_types = ExpenseType.objects.exclude(
id=expense_to_edit.expense_type.pk
).order_by('name')
# When the form method is post
if request.method == 'POST':
form = ExpenseForm(request.POST, instance=expense_to_edit)
# Check if form is valid
if form.is_valid():
# If form is valid, save changes to database
form.save()
# Display successful alert message
messages.success(
request,
'Your expense was updated successfully!'
)
# Redirect user to table of expense
return redirect('expense_tracking:expenses')
# When the form method is get
else:
# Generate expense form with selected data from edit button selected
form = ExpenseForm(instance=expense_to_edit)
# Render the form with populated data, including drop down values
# for expense types
return render(
request=request,
template_name='expense_tracking/edit_expense.html',
context={
'expense_types': expense_types,
'expense_to_edit': expense_to_edit,
'form': form
}
)
@login_required
def filter(request, id):
# Function requires user to be logged in and renders a table of expenses
# showing the date, type organization, amount and notes (with edit and
# delete buttons)
# Set up pagination
# Get 50 expenses per page, by expense date descending
p = Paginator(Expense.objects.filter(expense_type__id=id).order_by(
'-expense_date',
'expense_type__name',
'name',
'org'
), 50)
page = request.GET.get('page')
my_expenses = p.get_page(page)
distinct_expense_types = ExpenseType.objects.all()
# Render a dropdown list of expense types to filter by, the expense table
# list with 50 expense per page with page navigation at the bottom of the
# page
return render(request=request,
template_name='expense_tracking/expense.html',
context={
'my_expenses': my_expenses,
'distinct_expense_types': distinct_expense_types
}
)
@login_required()
def delete_expense(request, id):
# Function to delete an expense. User must be logged in to access.
# Obain expense to delete base on id passed from the form
expense_to_delete = Expense.objects.get(id=id)
# Delete the expense
expense_to_delete.delete()
# Display alert message stating expense date and name that was deleted.
messages.success(
request,
f'''Expense named {expense_to_delete.name} dated
{expense_to_delete.expense_date} was successfully deleted!'''
)
# Redirect to the table of expenses
return redirect('expense_tracking:expenses')
@login_required()
def logout_request(request):
# Function to log out of the application. User must be logged in to access.
logout(request)
# Log user out; display alert from log user out signal
log_user_logout()
# Redirect to the landing page
return redirect('catalog:index')
@login_required()
def get_data(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = DateRangeForm(request.POST)
# check whether it's valid:
if form.is_valid():
start = form.cleaned_data['start_date']
end = form.cleaned_data['end_date']
# String format of dates for chart
start_str = start.strftime('%m-%d-%Y')
end_str = end.strftime('%m-%d-%Y')
if ((end - start).days < 0 or (end - start).days > 366):
return render(
request=request,
template_name='expense_tracking/get_data.html',
context={
'form': form
}
)
else:
# Create dataframe from all expense records for specified
# fields
df = pd.DataFrame(list(
Expense.objects.all().values(
'expense_date',
'expense_type__name',
'name',
'org',
'amount',
'inserted_date'
)
)
)
# Max date for df
max_date = df['inserted_date'].max()
# Convert amount column values to float
df['amount'] = | pd.to_numeric(df['amount'], downcast='float') | pandas.to_numeric |
import pandas as pd
import requests
from tqdm import tqdm
from bs4 import BeautifulSoup
from datetime import datetime
import os
import io
import sys
import tkinter as tk
x = datetime.now()
DateTimeSTR = '{}{}{}'.format(
x.year,
str(x.month).zfill(2) if len(str(x.month)) < 2 else str(x.month),
str(x.day).zfill(2) if len(str(x.day)) < 2 else str(x.day))
def filetypesSelect(filedf, fileName, filetypesStr, check):
if 'csv' in filetypesStr:
filedf.to_csv('{}_{}.csv'.format(check, fileName), index=False, encoding='utf-8')
elif 'json' in filetypesStr:
filedf.to_json('{}_{}.json'.format(check, fileName), orient="records")
elif 'xlsx' in filetypesStr:
writer = pd.ExcelWriter('{}_{}.xlsx'.format(check, fileName), engine='xlsxwriter',
options={'strings_to_urls': False})
filedf.to_excel(writer, index=False, encoding='utf-8')
writer.close()
elif 'msgpack' in filetypesStr:
filedf.to_msgpack("{}_{}.msg".format(check, fileName), encoding='utf-8')
elif 'feather' in filetypesStr:
filedf.to_feather('{}_{}.feather'.format(check, fileName))
elif 'parquet' in filetypesStr:
filedf.to_parquet('{}_{}.parquet'.format(check, fileName), engine='pyarrow', encoding='utf-8')
elif 'pickle' in filetypesStr:
filedf.to_pickle('{}_{}.pkl'.format(check, fileName))
def change_label_number():
strLabel = tk.Label(window, text='處理中...')
strLabel.pack(anchor='center')
window.update()
global url
global zipfileName
global comboExample
comboExampleget = fileTypeListbox.get(fileTypeListbox.curselection())
req = requests.get('https://clinicaltrials.gov/ct2/results?cond=&term=&cntry=&state=&city=&dist=')
soup = BeautifulSoup(req.text, 'html5lib')
CTDataCounts = int(''.join(list(filter(str.isdigit, soup.findAll('div', {'class': 'sr-search-terms'})[1].text))))
strLabel2 = tk.Label(window, text='Downloads Clinical Trials Data.')
strLabel2.pack(anchor='center')
window.update()
for n in tqdm(range(1, CTDataCounts // 10000 + 2 - 29), ascii=True, desc='Downloads Data -> ', ncols=69):
url = "https://clinicaltrials.gov/ct2/results/download_fields?down_count=10000&down_flds=all&down_fmt=csv&down_chunk={}".format(
n)
s = requests.get(url).content
allCTData.extend(pd.read_csv(io.StringIO(s.decode('utf-8')), encoding='utf-8').to_dict('records'))
allCTDataDF = | pd.DataFrame(allCTData) | pandas.DataFrame |
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import calendar
import time
from datetime import datetime
import pytz
from scipy import stats
from os.path import exists
# an instance of apple Health
# fname is the name of data file to be parsed must be an XML files
# flags for cache
class AppleHealth:
def __init__(self, fname = 'export.xml', pivotIndex = 'endDate', readCache = False, writeCache = False):
#check cache flag and cache accordingly
if readCache:
a = time.time()
self.readCache(fname)
e = time.time()
self.runtime = e-a
print("Cache parsing Time = {}".format(e-a))
if writeCache:
self.cacheAll(fname)
return
# create element tree object
a = time.time()
s = time.time()
tree = ET.parse(fname)
e = time.time()
print("Tree parsing Time = {}".format(e-s))
# for every health record, extract the attributes into a dictionary (columns). Then create a list (rows).
s = time.time()
root = tree.getroot()
record_list = [x.attrib for x in root.iter('Record')]
workout_list = [x.attrib for x in root.iter('Workout')]
e = time.time()
print("record list Time = {}".format(e-s))
# create DataFrame from a list (rows) of dictionaries (columns)
s = time.time()
self.record_data = pd.DataFrame(record_list)
self.workout_data = pd.DataFrame(workout_list)
e = time.time()
print("creating DF Time = {}".format(e-s))
format = '%Y-%m-%d %H:%M:%S'
# proper type to dates
def get_split_date(strdt):
split_date = strdt.split()
str_date = split_date[1] + ' ' + split_date[2] + ' ' + split_date[5] + ' ' + split_date[3]
return str_date
s = time.time()
for col in ['creationDate', 'startDate', 'endDate']:
self.record_data[col] = | pd.to_datetime(self.record_data[col], format=format) | pandas.to_datetime |
# pragma pylint: disable=W0603
"""
Cryptocurrency Exchanges support
"""
import asyncio
import inspect
import logging
from copy import deepcopy
from datetime import datetime, timezone
from math import ceil
from typing import Any, Dict, List, Optional, Tuple
import arrow
import ccxt
import ccxt.async_support as ccxt_async
from ccxt.base.decimal_to_precision import (ROUND_DOWN, ROUND_UP, TICK_SIZE, TRUNCATE,
decimal_to_precision)
from pandas import DataFrame
from finrl.constants import ListPairsWithTimeframes
from finrl.data.converter import ohlcv_to_dataframe, trades_dict_to_list
from finrl.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError,
InvalidOrderException, OperationalException, RetryableOrderError,
TemporaryError)
from finrl.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, BAD_EXCHANGES, retrier,
retrier_async)
from finrl.misc import deep_merge_dicts, safe_value_fallback2
CcxtModuleType = Any
logger = logging.getLogger(__name__)
class Exchange:
_config: Dict = {}
# Parameters to add directly to ccxt sync/async initialization.
_ccxt_config: Dict = {}
# Parameters to add directly to buy/sell calls (like agreeing to trading agreement)
_params: Dict = {}
# Dict to specify which options each exchange implements
# This defines defaults, which can be selectively overridden by subclasses using _ft_has
# or by specifying them in the configuration.
_ft_has_default: Dict = {
"stoploss_on_exchange": False,
"order_time_in_force": ["gtc"],
"ohlcv_candle_limit": 500,
"ohlcv_partial_candle": True,
"trades_pagination": "time", # Possible are "time" or "id"
"trades_pagination_arg": "since",
"l2_limit_range": None,
}
_ft_has: Dict = {}
def __init__(self, config: Dict[str, Any], validate: bool = True) -> None:
"""
Initializes this module with the given config,
it does basic validation whether the specified exchange and pairs are valid.
:return: None
"""
self._api: ccxt.Exchange = None
self._api_async: ccxt_async.Exchange = None
self._config.update(config)
# Holds last candle refreshed time of each pair
self._pairs_last_refresh_time: Dict[Tuple[str, str], int] = {}
# Timestamp of last markets refresh
self._last_markets_refresh: int = 0
# Holds candles
self._klines: Dict[Tuple[str, str], DataFrame] = {}
# Holds all open sell orders for dry_run
self._dry_run_open_orders: Dict[str, Any] = {}
if config['dry_run']:
logger.info('Instance is running with dry_run enabled')
logger.info(f"Using CCXT {ccxt.__version__}")
exchange_config = config['exchange']
# Deep merge ft_has with default ft_has options
self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default))
if exchange_config.get('_ft_has_params'):
self._ft_has = deep_merge_dicts(exchange_config.get('_ft_has_params'),
self._ft_has)
logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has)
# Assign this directly for easy access
self._ohlcv_candle_limit = self._ft_has['ohlcv_candle_limit']
self._ohlcv_partial_candle = self._ft_has['ohlcv_partial_candle']
self._trades_pagination = self._ft_has['trades_pagination']
self._trades_pagination_arg = self._ft_has['trades_pagination_arg']
# Initialize ccxt objects
ccxt_config = self._ccxt_config.copy()
ccxt_config = deep_merge_dicts(exchange_config.get('ccxt_config', {}), ccxt_config)
ccxt_config = deep_merge_dicts(exchange_config.get('ccxt_sync_config', {}), ccxt_config)
self._api = self._init_ccxt(exchange_config, ccxt_kwargs=ccxt_config)
ccxt_async_config = self._ccxt_config.copy()
ccxt_async_config = deep_merge_dicts(exchange_config.get('ccxt_config', {}),
ccxt_async_config)
ccxt_async_config = deep_merge_dicts(exchange_config.get('ccxt_async_config', {}),
ccxt_async_config)
self._api_async = self._init_ccxt(
exchange_config, ccxt_async, ccxt_kwargs=ccxt_async_config)
logger.info('Using Exchange "%s"', self.name)
if validate:
# Check if timeframe is available
self.validate_timeframes(config.get('timeframe'))
# Initial markets load
self._load_markets()
# Check if all pairs are available
self.validate_stakecurrency(config['stake_currency'])
if not exchange_config.get('skip_pair_validation'):
self.validate_pairs(config['exchange']['pair_whitelist'])
self.validate_ordertypes(config.get('order_types', {}))
self.validate_order_time_in_force(config.get('order_time_in_force', {}))
self.validate_required_startup_candles(config.get('startup_candle_count', 0))
# Converts the interval provided in minutes in config to seconds
self.markets_refresh_interval: int = exchange_config.get(
"markets_refresh_interval", 60) * 60
def __del__(self):
"""
Destructor - clean up async stuff
"""
logger.debug("Exchange object destroyed, closing async loop")
if self._api_async and inspect.iscoroutinefunction(self._api_async.close):
asyncio.get_event_loop().run_until_complete(self._api_async.close())
def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt,
ccxt_kwargs: dict = None) -> ccxt.Exchange:
"""
Initialize ccxt with given config and return valid
ccxt instance.
"""
# Find matching class for the given exchange name
name = exchange_config['name']
if not is_exchange_known_ccxt(name, ccxt_module):
raise OperationalException(f'Exchange {name} is not supported by ccxt')
ex_config = {
'apiKey': exchange_config.get('key'),
'secret': exchange_config.get('secret'),
'password': exchange_config.get('password'),
'uid': exchange_config.get('uid', ''),
}
if ccxt_kwargs:
logger.info('Applying additional ccxt config: %s', ccxt_kwargs)
ex_config.update(ccxt_kwargs)
try:
api = getattr(ccxt_module, name.lower())(ex_config)
except (KeyError, AttributeError) as e:
raise OperationalException(f'Exchange {name} is not supported') from e
except ccxt.BaseError as e:
raise OperationalException(f"Initialization of ccxt failed. Reason: {e}") from e
self.set_sandbox(api, exchange_config, name)
return api
@property
def name(self) -> str:
"""exchange Name (from ccxt)"""
return self._api.name
@property
def id(self) -> str:
"""exchange ccxt id"""
return self._api.id
@property
def timeframes(self) -> List[str]:
return list((self._api.timeframes or {}).keys())
@property
def ohlcv_candle_limit(self) -> int:
"""exchange ohlcv candle limit"""
return int(self._ohlcv_candle_limit)
@property
def markets(self) -> Dict:
"""exchange ccxt markets"""
if not self._api.markets:
logger.info("Markets were not loaded. Loading them now..")
self._load_markets()
return self._api.markets
@property
def precisionMode(self) -> str:
"""exchange ccxt precisionMode"""
return self._api.precisionMode
def get_markets(self, base_currencies: List[str] = None, quote_currencies: List[str] = None,
pairs_only: bool = False, active_only: bool = False) -> Dict:
"""
Return exchange ccxt markets, filtered out by base currency and quote currency
if this was requested in parameters.
TODO: consider moving it to the Dataprovider
"""
markets = self.markets
if not markets:
raise OperationalException("Markets were not loaded.")
if base_currencies:
markets = {k: v for k, v in markets.items() if v['base'] in base_currencies}
if quote_currencies:
markets = {k: v for k, v in markets.items() if v['quote'] in quote_currencies}
if pairs_only:
markets = {k: v for k, v in markets.items() if self.market_is_tradable(v)}
if active_only:
markets = {k: v for k, v in markets.items() if market_is_active(v)}
return markets
def get_quote_currencies(self) -> List[str]:
"""
Return a list of supported quote currencies
"""
markets = self.markets
return sorted(set([x['quote'] for _, x in markets.items()]))
def get_pair_quote_currency(self, pair: str) -> str:
"""
Return a pair's quote currency
"""
return self.markets.get(pair, {}).get('quote', '')
def get_pair_base_currency(self, pair: str) -> str:
"""
Return a pair's quote currency
"""
return self.markets.get(pair, {}).get('base', '')
def market_is_tradable(self, market: Dict[str, Any]) -> bool:
"""
Check if the market symbol is tradable by Freqtrade.
By default, checks if it's splittable by `/` and both sides correspond to base / quote
"""
symbol_parts = market['symbol'].split('/')
return (len(symbol_parts) == 2 and
len(symbol_parts[0]) > 0 and
len(symbol_parts[1]) > 0 and
symbol_parts[0] == market.get('base') and
symbol_parts[1] == market.get('quote')
)
def klines(self, pair_interval: Tuple[str, str], copy: bool = True) -> DataFrame:
if pair_interval in self._klines:
return self._klines[pair_interval].copy() if copy else self._klines[pair_interval]
else:
return | DataFrame() | pandas.DataFrame |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import NamedTuple
import numpy as np
import pandas as pd
from .nmc import obtain_posterior
logger = logging.getLogger(__name__)
class ModelOutput(NamedTuple):
theta_samples: np.array
theta_means: np.array
theta_cis: np.array
psi_samples: np.array
psi_means: np.array
psi_cis: np.array
item_samples: np.array
item_means: np.array
item_cis: np.array
class LabelingErrorBMModel(object):
"""
Inference for CLARA's uniform model using an specialized and scalable
algorithm called nmc
"""
def __init__(
self, burn_in: int = 1000, num_samples: int = 1000, ci_coverage: float = 0.95
):
self.burn_in = burn_in
self.num_samples = num_samples
self.ub = 100 * (ci_coverage / 2.0 + 0.5)
self.lb = 100 - self.ub
# Compute the mean and 95% CI of theta given the samples collected
def _process_theta_output(self, thetas, n_item_groups, n_unique_ratings):
theta_means = np.empty((n_item_groups, n_unique_ratings))
theta_cis = np.empty((n_item_groups, n_unique_ratings, 2))
for ig in range(n_item_groups):
mean = np.mean(thetas[:, ig, :], axis=0)
lb, ub = np.percentile(thetas[:, ig, :], [self.lb, self.ub], axis=0)
theta_means[ig, :] = mean
theta_cis[ig, :, 0] = lb
theta_cis[ig, :, 1] = ub
return theta_means, theta_cis
def _process_clara_input_df_for_nmc(self, df: pd.DataFrame):
labels = df.ratings.values[0]
labelers = df.labelers.values[0]
num_labels = df.num_labels.values[0]
labeler_idx = category_idx = 0
rating_vocab, labelers_names = {}, {}
for e in labelers:
if e not in labelers_names.keys():
labelers_names[e] = labeler_idx
labeler_idx += 1
for e in labels:
if e not in rating_vocab.keys():
rating_vocab[e] = category_idx
category_idx += 1
concentration, expected_correctness = 10, 0.75
args_dict = {}
args_dict["k"] = labeler_idx
args_dict["num_samples_nmc"] = self.num_samples
args_dict["burn_in_nmc"] = self.burn_in
args_dict["model_args"] = (category_idx, 1, expected_correctness, concentration)
data_train = (labels, labelers, num_labels)
return (data_train, args_dict, num_labels, category_idx)
def _process_item_output(
self, prevalence_samples, labeler_confusion, num_categories, input_df
):
labels = input_df[0].loc[0]
labelers = input_df[1].loc[0]
num_labels = input_df[2].loc[0]
labelers_list, labels_list = [], []
pos = 0
for i in range(len(num_labels)):
item_labelers, item_labels = [], []
for _j in range(num_labels[i]):
item_labelers.append(labelers[pos])
item_labels.append(labels[pos])
pos += 1
labelers_list.append(item_labelers)
labels_list.append(item_labels)
n_items = len(num_labels)
n_unique_ratings = num_categories
item_means = np.empty((n_items, n_unique_ratings))
item_cis = np.empty((n_items, n_unique_ratings, 2))
n_samples = len(prevalence_samples)
# collect sampled probabilities for each item
item_samples = np.empty((n_samples, n_items, n_unique_ratings))
for i in range(n_items):
for s in range(n_samples):
item_samples[s, i] = prevalence_samples[s]
for k in range(n_unique_ratings):
for j in range(num_labels[i]):
item_rating = labels_list[i][j]
item_labeler = labelers_list[i][j]
item_samples[s, i, k] *= labeler_confusion[s][item_labeler][k][
item_rating
]
item_samples[s, i, :] /= sum(item_samples[s, i, :])
item_means[i] = np.mean(item_samples[:, i], axis=0)
lb, ub = np.percentile(item_samples[:, i], [self.lb, self.ub], axis=0)
item_cis[i, :, 0] = lb
item_cis[i, :, 1] = ub
return item_samples, item_means, item_cis
# Compute the mean and 95% CI of psi given the samples collected
def _process_psi_output(self, psis, n_labeler_groups, n_unique_ratings):
psi_means = np.empty((n_labeler_groups, n_unique_ratings, n_unique_ratings))
psi_cis = np.empty((n_labeler_groups, n_unique_ratings, n_unique_ratings, 2))
for lg in range(n_labeler_groups):
s = psis[:, lg]
mean = np.mean(s, axis=0)
lb, ub = np.percentile(s, [self.lb, self.ub], axis=0)
psi_means[lg] = mean
psi_cis[lg, :, :, 0] = lb
psi_cis[lg, :, :, 1] = ub
return psi_means, psi_cis
# Fit the model using exact inference
def fit(
self, df: pd.DataFrame, n_item_groups: int = None, n_labeler_groups: int = None
):
out = self._process_clara_input_df_for_nmc(df)
data_train, args_dict, num_labels, num_categories = out
items = []
items.append(data_train)
if n_item_groups is None:
n_item_groups = 1
if n_labeler_groups is None:
n_labeler_groups = args_dict["k"]
logger.info("Fitting using NMC ...")
# TO DO: incorporate n_item_groups and n_labeler_groups into nmc
# TO DO: extend nmc to support n_item_groups > 1 and
# n_labeler_groups != args_dict["k"]
samples, timing = obtain_posterior(data_train, args_dict)
logger.info(f"Fitting took {timing['inference_time']} sec")
samples_df = pd.DataFrame(samples)
# process output
logger.info("Processing outputs ...")
thetas = np.empty((self.num_samples, n_item_groups, num_categories))
psis = np.empty(
(self.num_samples, args_dict["k"], num_categories, num_categories)
)
for i in range(self.num_samples):
thetas[i] = samples_df["pi"].values[i]
psis[i] = samples_df["theta"].values[i]
theta_means, theta_cis = self._process_theta_output(
thetas, n_item_groups, num_categories
)
psi_means, psi_cis = self._process_psi_output(
psis, args_dict["k"], num_categories
)
item_samples, item_means, item_cis = self._process_item_output(
thetas, psis, num_categories, | pd.DataFrame(items) | pandas.DataFrame |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and | notna(result) | pandas.core.dtypes.missing.notna |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Monday 3 December 2018
@author: <NAME>
"""
import os
import pandas as pd
import numpy as np
import feather
import time
from datetime import date
import sys
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import normalize
import somoclu
from delprocess.loadprofiles import resampleProfiles
from .metrics import mean_index_adequacy, davies_bouldin_score
from ..support import cluster_dir, results_dir
def progress(n, stats):
"""Report progress information, return a string."""
s = "%s : " % (n)
s += "\nsilhouette: %(silhouette).3f " % stats
s += "\ndbi: %(dbi).3f " % stats
s += "\nmia: %(mia).3f " % stats
return print(s)
def clusterStats(cluster_stats, n, X, cluster_labels, preprocessing, transform, tic, toc):
stats = {'n_sample': 0,
'cluster_size': [],
'silhouette': 0.0,
'dbi': 0.0,
'mia': 0.0,
'all_scores': 0.0,
# 'cdi': 0.0,
't0': time.time(),
'batch_fit_time': 0.0,
'total_sample': 0}
cluster_stats[n] = stats
try:
cluster_stats[n]['total_sample'] += X.shape[0]
cluster_stats[n]['n_sample'] = X.shape[0]
cluster_stats[n]['silhouette'] = silhouette_score(X, cluster_labels, sample_size=10000)
cluster_stats[n]['dbi'] = davies_bouldin_score(X, cluster_labels)
cluster_stats[n]['mia'] = mean_index_adequacy(X, cluster_labels)
#cluster_stats[n_clusters][y]['cdi'] =cluster_dispersion_index(Xbatch, cluster_labels) DON'T RUN LOCALLY!! - need to change to chunked alogrithm once released
cluster_stats[n]['cluster_size'] = np.bincount(cluster_labels)
cluster_stats[n]['batch_fit_time'] = toc - tic
cluster_stats[n]['preprocessing'] = preprocessing
cluster_stats[n]['transform'] = transform
cluster_stats[n]['all_scores'] = cluster_stats[n]['dbi']*cluster_stats[n]['mia']/cluster_stats[n]['silhouette']
s = "%s : " % (n)
s += "\nsilhouette: %(silhouette).3f " % stats
s += "\ndbi: %(dbi).3f " % stats
s += "\nmia: %(mia).3f " % stats
print(s)
except:
print('Could not compute clustering stats for n = ' + str(n))
pass
return cluster_stats
def saveResults(experiment_name, cluster_stats, cluster_centroids, som_dim, elec_bin, save=True):
"""
Saves cluster stats results and centroids for a single clustering iteration.
Called inside kmeans() and som() functions.
"""
for k, v in cluster_stats.items():
n = k
evals = pd.DataFrame(cluster_stats).T
evals['experiment_name'] = experiment_name
evals['som_dim'] = som_dim
evals['n_clust'] = n
evals['elec_bin'] = elec_bin
eval_results = evals.drop(labels='cluster_size', axis=1).reset_index(drop=True)
# eval_results.rename({'index':'k'}, axis=1, inplace=True)
eval_results[['dbi','mia','silhouette']] = eval_results[['dbi','mia','silhouette']].astype(float)
eval_results['date'] = date.today().isoformat()
# eval_results['best_clusters'] = None
centroid_results = pd.DataFrame(cluster_centroids)
centroid_results['experiment_name'] = experiment_name
centroid_results['som_dim'] = som_dim
centroid_results['n_clust'] = n
centroid_results['elec_bin'] = elec_bin
try:
centroid_results['cluster_size'] = evals['cluster_size'][n]
except:
centroid_results['cluster_size'] = np.nan
centroid_results.reset_index(inplace=True)
centroid_results.rename({'index':'k'}, axis=1, inplace=True)
centroid_results['date'] = date.today().isoformat()
#3 Save Results
if save is True:
os.makedirs(results_dir, exist_ok=True)
erpath = os.path.join(results_dir, 'cluster_results.csv')
if os.path.isfile(erpath):
eval_results.to_csv(erpath, mode='a', index=False, header=False)
else:
eval_results.to_csv(erpath, index=False)
os.makedirs(cluster_dir, exist_ok=True)
crpath = os.path.join(cluster_dir, experiment_name + '_centroids.csv')
if os.path.isfile(crpath):
centroid_results.to_csv(crpath, mode='a', index=False, header=False)
else:
centroid_results.to_csv(crpath, index=False)
print('Results saved for', experiment_name, str(som_dim), str(n))
return eval_results, centroid_results
def xBins(X, bin_type):
if bin_type == 'amd':
Xdd_A = X.sum(axis=1)
Xdd = Xdd_A*230/1000
XmonthlyPower = resampleProfiles(Xdd, interval='M', aggfunc='sum')
Xamd = resampleProfiles(XmonthlyPower, interval='A', aggfunc='mean').reset_index().groupby('ProfileID').mean()
Xamd.columns=['amd']
amd_bins = [0, 1, 50, 150, 400, 600, 1200, 2500, 4000]
bin_labels = ['{0:.0f}-{1:.0f}'.format(x,y) for x, y in zip(amd_bins[:-1], amd_bins[1:])]
Xamd['bins'] = pd.cut(Xamd.amd, amd_bins, labels=bin_labels, right=True, include_lowest=True)
Xbin_dict = dict()
for c in Xamd.bins.cat.categories:
Xbin_dict[c] = Xamd[Xamd.bins==c].index.values
del Xdd_A, Xdd, XmonthlyPower, Xamd
if bin_type == 'integral':
Xint = normalize(X).cumsum(axis=1)
Xintn = pd.DataFrame(Xint, index=X.index)
Xintn['max'] = X.max(axis=1)
clusterer = MiniBatchKMeans(n_clusters=8, random_state=10)
clusterer.fit(np.array(Xintn))
cluster_labels = clusterer.predict(np.array(Xintn))
labl = pd.DataFrame(cluster_labels, index=X.index)
Xbin_dict = dict()
for c in labl[0].unique():
Xbin_dict['bin'+str(c)] = labl[labl[0]==c].index.values
return Xbin_dict
def preprocessX(X, norm=None):
if norm == 'unit_norm': #Kwac et al 2013
Xnorm = normalize(X)
elif norm == 'zero-one': #Dent et al 2014
Xnorm = np.array(X.divide(X.max(axis=1), axis=0))
elif norm == 'demin': #Jin et al 2016
Xnorm = normalize(X.subtract(X.min(axis=1), axis=0))
elif norm == 'sa_norm': #Dekenah 2014
Xnorm = np.array(X.divide(X.mean(axis=1), axis=0))
else:
Xnorm = np.array(X)
#Xnorm.fillna(0, inplace=True)
Xnorm[np.isnan(Xnorm)] = 0
return Xnorm
def kmeans(X, range_n_clusters, top_lbls=10, preprocessing = None, bin_X=False, experiment_name=None):
"""
This function applies the MiniBatchKmeans algorithm from sklearn on inputs X for range_n_clusters.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
Returns cluster stats, cluster centroids and cluster labels.
"""
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'all':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = pd.DataFrame()
dim = 0 #set dim to 0 to match SOM formating
cluster_lbls_dim = {}
stats_dim = pd.DataFrame()
for n_clust in range_n_clusters:
clusterer = MiniBatchKMeans(n_clusters=n_clust, random_state=10)
#train clustering algorithm
tic = time.time()
clusterer.fit(A)
cluster_labels = clusterer.predict(A)
toc = time.time()
## Calculate scores
cluster_stats = clusterStats({}, n_clust, A, cluster_labels,
preprocessing = preprocessing, transform = None,
tic = tic, toc = toc)
cluster_centroids = clusterer.cluster_centers_
eval_results, centroid_results = saveResults(experiment_name, cluster_stats,
cluster_centroids, dim, b, save)
stats_dim = stats_dim.append(eval_results)
centroids = centroids.append(centroid_results)
cluster_lbls_dim[n_clust] = cluster_labels
#outside n_clust loop
best_clusters, best_stats = bestClusters(cluster_lbls_dim, stats_dim, top_lbls)
cluster_lbls = pd.concat([cluster_lbls, best_clusters], axis=1)
stats = pd.concat([stats, best_stats], axis=0)
stats.reset_index(drop=True, inplace=True)
if save is True:
saveLabels(cluster_lbls, stats)
return stats, centroids, cluster_lbls
def som(X, range_n_dim, top_lbls=10, preprocessing = None, bin_X=False, transform=None, experiment_name=None, **kwargs):
"""
This function applies the self organising maps algorithm from somoclu on inputs X over square maps of range_n_dim.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
If kmeans = True, the KMeans algorithm from sklearn is applied to the SOM and returns clusters
kwargs can be n_clusters = range(start, end, interval) OR list()
Returns cluster stats, cluster centroids and cluster labels.
"""
for dim in range_n_dim:
limit = int(np.sqrt(len(X)/20))
if dim > limit: #verify that number of nodes are sensible for size of input data
return print('Input size too small for map. Largest n should be ' + str(limit))
else:
pass
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'0-4000':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
def fix_datasets():
dati = pd.read_csv("dati_regioni.csv")
regioni = pd.read_csv("regioni.csv")
## Devo mergiare i dati del trentino
dati.drop(columns = ["casi_da_sospetto_diagnostico", "casi_da_screening"], axis = 1, inplace = True)
df_r = dati.loc[(dati['denominazione_regione'] == "P.A. Bolzano") | (dati['denominazione_regione'] == "P.A. Trento")]
df_trentino = df_r.groupby("data").sum()
df_trentino['denominazione_regione'] = "Trentino Alto Adige"
df_trentino['lat'] = 46.068935
df_trentino['long'] = 11.121231
df_trentino = df_trentino.reset_index()
dati = dati.loc[(dati['denominazione_regione'] != "P.A. Trento") & (dati['denominazione_regione'] != "P.A. Bolzano")]
dati_fix = pd.concat([dati, df_trentino], sort=False)
dati_fix['stato'] = "ITA"
dati_fix = dati_fix.drop(dati_fix[["note"]], axis=1)
dati_correct = pd.merge(dati_fix, regioni, left_on = 'denominazione_regione', right_on = 'regione')
dati_correct = dati_correct.drop('denominazione_regione', axis = 1)
dati_correct.to_csv("dati.csv")
print("Dataset Regioni fixed")
# ***
# Province
dati_p = | pd.read_csv("dati_province.csv") | pandas.read_csv |
import csv
import logging
import os
import re
import time
import traceback
import pandas as pd
import requests
import xlrd
import settings
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
class ExcelConverter(object):
"""
Conversion of multiple excel sheets to csv files
Adapted from http://strife.pl/2014/12/converting-large-xls-xlsx-files-to-csv-using-python/
"""
def __init__(self):
pass
@staticmethod
def excel_to_csv(wb=None, xls_file=settings.MAIN_FILE, target_folder=settings.PROCESSED_FOLDER):
"""
Convert an excel file(.xls/.xslx) to CSV writing all sheets to one file
:param xls_file: Original excel file to be converted
:param target_folder: Folder in which the generated file will be stored
:param wb: Loaded workbook
"""
print("Start converting")
if wb:
print("Workbook provided")
else:
print("No workbook provided")
wb = xlrd.open_workbook(xls_file)
base = os.path.basename(xls_file)
target = target_folder + os.path.splitext(base)[0] + '.csv'
csv_file = open(target, 'w+')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
first_sheet = True
count = 1
for sheet_name in wb.sheet_names():
try:
sh = wb.sheet_by_name(sheet_name)
if sheet_name == 'Kyegegwa': # Kyegegwa has completely different data
print("\nMoving on, Kyegegwa has different data")
continue
if first_sheet:
range_start = 0
else:
range_start = 1
for row in range(range_start, sh.nrows):
row_values = sh.row_values(row)
new_values = []
for s in row_values:
str_value = (str(s).strip())
is_int = bool(re.match("^([0-9]+)\.0$", str_value))
if is_int:
str_value = int(float(str_value))
else:
is_float = bool(re.match("^([0-9]+)\.([0-9]+)$", str_value))
is_long = bool(re.match("^([0-9]+)\.([0-9]+)e\+([0-9]+)$", str_value))
if is_float:
str_value = float(str_value)
if is_long:
str_value = int(float(str_value))
new_values.append(str_value)
wr.writerow(new_values)
print('.', end='', flush=True)
count += 1
except Exception as e:
logging.error(str(e) + " " + traceback.format_exc())
first_sheet = False
csv_file.close()
print("Done Converting {} files".format(count))
@staticmethod
def excel_to_csv_multiple(xls_file, target_folder, wb=None):
"""
Convert an excel file(.xls/.xslx) to CSV writing each sheet to a separate file
:param xls_file: Original excel file to be converted
:param target_folder: Folder in which the generated files will be stored
"""
print("Start converting")
if not wb:
wb = xlrd.open_workbook(xls_file)
count = 1
for sheet_name in wb.sheet_names():
try:
target = target_folder + sheet_name.upper() + '.csv'
sh = wb.sheet_by_name(sheet_name)
if sheet_name == 'Kyegegwa': # Kyegegwa has completely different data
print("\nMoving on, Kyegegwa has different data")
continue
csv_file = open(target, 'w')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
for row in range(sh.nrows):
row_values = sh.row_values(row)
new_values = []
for s in row_values:
str_value = (str(s))
is_int = bool(re.match("^([0-9]+)\.0$", str_value))
if is_int:
str_value = int(float(str_value))
else:
is_float = bool(re.match("^([0-9]+)\.([0-9]+)$", str_value))
is_long = bool(re.match("^([0-9]+)\.([0-9]+)e\+([0-9]+)$", str_value))
if is_float:
str_value = float(str_value)
if is_long:
str_value = int(float(str_value))
new_values.append(str_value)
wr.writerow(new_values)
csv_file.close()
print('.', end='', flush=True)
count += 1
except Exception as e:
logging.error(str(e) + " " + traceback.format_exc())
print("\nDone Converting {} files".format(count))
class PLEInfo(object):
@classmethod
def get_rows_columns(cls, file=settings.MAIN_FILE):
"""
Method prints how many columns and rows each sheet has
:param file: File which has the data
:return None:
"""
cls.get_columns(file)
cls.get_rows(file)
# work_book = xlrd.open_workbook(file)
# sheet_names = work_book.sheet_names()
# print("District | Rows | Columns ")
#
# for sheet_name in sheet_names:
# sheet = work_book.sheet_by_name(sheet_name)
# print("{0} | {1} | {2} ".format(sheet.name, sheet.ncols, sheet.nrows,))
@staticmethod
def get_columns(file):
"""
Method prints how many columns each sheet has
:param file: File which has the data
:return None:
"""
work_book = xlrd.open_workbook(file)
sheet_names = work_book.sheet_names()
print("District | Columns")
for sheet_name in sheet_names:
sheet = work_book.sheet_by_name(sheet_name)
print("{0} | {1}".format(sheet.name, sheet.ncols, ))
@staticmethod
def get_rows(file):
"""
Method prints how many rows each sheet has
:param file: File which has the data
:return None:
"""
work_book = xlrd.open_workbook(file)
sheet_names = work_book.sheet_names()
print("District | Rows")
for sheet_name in sheet_names:
sheet = work_book.sheet_by_name(sheet_name)
print("{0} | {1}".format(sheet.name, sheet.nrows, ))
def find_csv_shape(folder):
d = {}
for path, folders, files in os.walk(folder):
for file in files:
f = os.path.join(path, file)
csv = pd.read_csv(f)
if len(csv.columns) in d:
d[len(csv.columns)] += 1
else:
d[len(csv.columns)] = 1
print("{0}\n{1} - {2}".format(len(csv.columns), file, csv.columns))
total_districts = sum(d.values())
print("Sheets with column length {}".format(d))
for key, value in d.items():
percentage = (value / total_districts) * 100
print("{} - {:.2f}%".format(key, percentage))
def remove_unnamed(folder, right_size):
for path, folders, files in os.walk(folder):
for file in files:
f = os.path.join(path, file)
old_csv = | pd.read_csv(f) | pandas.read_csv |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = pd.DataFrame({'foo': [1, 2, 3]})
table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
margins_name=greek)
index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': range(11)})
result = pivot_table(data, index='A', columns='B', aggfunc='sum')
mi = MultiIndex(levels=[['C'], ['one', 'two']],
codes=[[0, 0], [0, 1]], names=[None, 'B'])
expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},
('C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index='A', columns='B',
aggfunc=['sum', 'mean'])
mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, 'B'])
expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},
('mean', 'C', 'two'): {'bar': 7.0,
'foo': 6.666666666666667},
('sum', 'C', 'one'): {'bar': 15, 'foo': 13},
('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('f, f_numpy',
[('sum', np.sum),
('mean', np.mean),
('std', np.std),
(['sum', 'mean'], [np.sum, np.mean]),
(['sum', 'std'], [np.sum, np.std]),
(['std', 'mean'], [np.std, np.mean])])
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index='A', columns='B', aggfunc=f)
expected = pivot_table(self.data, index='A', columns='B',
aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame({'ind1': np.arange(2 ** 16),
'ind2': np.arange(2 ** 16),
'count': 0})
with pytest.raises(ValueError, match='int32 overflow'):
df.pivot_table(index='ind1', columns='ind2',
values='count', aggfunc='count')
class TestCrosstab(object):
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
assert result.index.name == 'row_0'
assert result.columns.name == 'col_0'
def test_crosstab_non_aligned(self):
# GH 17005
a = pd.Series([0, 1, 1], index=['a', 'b', 'c'])
b = pd.Series([3, 4, 3, 4, 3], index=['a', 'b', 'c', 'd', 'f'])
c = np.array([3, 4, 3])
expected = pd.DataFrame([[1, 0], [1, 1]],
index=Index([0, 1], name='row_0'),
columns=Index([3, 4], name='col_0'))
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['All'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name='TOTAL')
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['TOTAL', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['TOTAL'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('TOTAL', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['TOTAL']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('TOTAL', '')]))
exp_rows.name = 'TOTAL'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
for margins_name in [666, None, ['a', 'b']]:
with pytest.raises(ValueError):
crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name=margins_name)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab([a, b], c, values, aggfunc=np.sum,
rownames=['foo', 'bar'], colnames=['baz'])
df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})
expected = df.pivot_table('values', index=['foo', 'bar'],
columns='baz', aggfunc=np.sum)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', 'two', 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
res = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], dropna=False)
m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),
('two', 'dull'), ('two', 'shiny')],
names=['b', 'c'])
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = pd.Series([1, 2, 3], index=[1, 2, 3])
s2 = pd.Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = pd.DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, np.nan, 2],
'b': [3, 3, 4, 4, 4, 4]})
actual = | pd.crosstab(df.a, df.b, margins=True, dropna=True) | pandas.crosstab |
#For the computation of average temperatures using GHCN data
import ulmo, pandas as pd, matplotlib.pyplot as plt, numpy as np, csv, pickle
#Grab weather stations that meet criteria (from previous work) and assign lists
st = ulmo.ncdc.ghcn_daily.get_stations(country ='US',elements=['TMAX'],end_year=1950, as_dataframe = True)
ids = pd.read_csv('IDfile.csv')
idnames = [ids.id[x] for x in range(0, len(ids))]
Longitude= []
Latitude = []
ID = []
Elev = []
Name = []
Tmaxavgresult = []
Tminavgresult = []
Tmaxavg = []
Tminavg = []
Tmaxstd = []
Tminstd = []
Tmaxz = []
Tminz = []
Tmaxanom = []
Tminanom = []
maxset = []
minset = []
#
#
#begin loop to isolate values of interest
#
#test loop to verify if works
#rando = np.random.random_integers(len(idnames), size = (10,))
#for x in rando:
for q in range(0,len(ids)-1):
# Grab data and transform to K
a = st.id[idnames[q]]
data = ulmo.ncdc.ghcn_daily.get_data(a, as_dataframe=True)
tmax = data['TMAX'].copy()
tmin = data['TMIN'].copy()
tmax.value = (tmax.value/10.0) + 273.15
tmin.value = (tmin.value/10.0) + 273.15
# Name and save parameters
b = st.longitude[idnames[q]]
c = st.latitude[idnames[q]]
d = st.elevation[idnames[q]]
e = st.name[idnames[q]]
#average only data on a monthly basis and generate x and y coordinates for the years in which El Nino data exists (probably a way more efficient way to do this)
filedatasmax = []
filedatasmin = []
for y in range(1951,2016):
filedatamax = {}
filedatamin = {}
datejan = ''.join([str(y),"-01"])
datefeb = ''.join([str(y),"-02"])
datemar = ''.join([str(y),"-03"])
dateapr = ''.join([str(y),"-04"])
datemay = ''.join([str(y),"-05"])
datejun = ''.join([str(y),"-06"])
datejul = ''.join([str(y),"-07"])
dateaug = ''.join([str(y),"-08"])
datesep = ''.join([str(y),"-09"])
dateoct = ''.join([str(y),"-10"])
datenov = ''.join([str(y),"-11"])
datedec = ''.join([str(y),"-12"])
nanmaxjan = tmax['value'][datejan]
nanminjan = tmin['value'][datejan]
nanmaxfeb = tmax['value'][datefeb]
nanminfeb = tmin['value'][datefeb]
nanmaxmar = tmax['value'][datemar]
nanminmar = tmin['value'][datemar]
nanmaxapr = tmax['value'][dateapr]
nanminapr = tmin['value'][dateapr]
nanmaxmay = tmax['value'][datemay]
nanminmay = tmin['value'][datemay]
nanmaxjun = tmax['value'][datejun]
nanminjun = tmin['value'][datejun]
nanmaxjul = tmax['value'][datejul]
nanminjul = tmin['value'][datejul]
nanmaxaug = tmax['value'][dateaug]
nanminaug = tmin['value'][dateaug]
nanmaxsep = tmax['value'][datesep]
nanminsep = tmin['value'][datesep]
nanmaxoct = tmax['value'][dateoct]
nanminoct = tmin['value'][dateoct]
nanmaxnov = tmax['value'][datenov]
nanminnov = tmin['value'][datenov]
nanmaxdec = tmax['value'][datedec]
nanmindec = tmin['value'][datedec]
#We now concatenate everything
filedatamax = [y, nanmaxjan[~pd.isnull(nanmaxjan)].mean(), nanmaxfeb[~pd.isnull(nanmaxfeb)].mean(),nanmaxmar[~pd.isnull(nanmaxmar)].mean(),nanmaxapr[~pd.isnull(nanmaxapr)].mean(),nanmaxmay[~pd.isnull(nanmaxmay)].mean(),nanmaxjun[~pd.isnull(nanmaxjun)].mean(),nanmaxjul[~pd.isnull(nanmaxjul)].mean(),nanmaxaug[~pd.isnull(nanmaxaug)].mean(),nanmaxsep[~pd.isnull(nanmaxsep)].mean(),nanmaxoct[~pd.isnull(nanmaxoct)].mean(),nanmaxnov[~pd.isnull(nanmaxnov)].mean(),nanmaxdec[~pd.isnull(nanmaxdec)].mean() ]
filedatamin = [y, nanminjan[~ | pd.isnull(nanminjan) | pandas.isnull |
import pandas as pd
import numpy as np
def frequency_encoding(df,feature):
map_dict=df[feature].value_counts().to_dict()
df[feature]=df[feature].map(map_dict)
def target_guided_encoding(df,feature,target):
order=df.groupby([feature])[target].mean().sort_values().index
map_dic={k:i for i,k in enumerate(order,0)}
df[feature]=df[feature].map(map_dic)
def mean_encoding(df,feature,target):
map_dict=df.groupby([feature])[target].mean().to_dict()
df[feature]=df[feature].map(map_dict)
def probability_ratio_encoding(df,feature,target):
order=df.groupby([feature])[target].mean()
prob_df=pd.DataFrame(order)
prob_df['temp']=1-prob_df[target]
prob_df['encoding']=prob_df[target]/prob_df['temp']
map_dict=prob_df['encoding'].to_dict()
df[feature]=df[feature].map(map_dict)
def one_hot(df,feature):
dummies=pd.get_dummies(df[feature],drop_first=True)
df= | pd.concat([df,dummies],axis=1) | pandas.concat |
Subsets and Splits