prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os, datetime, pymongo, configparser
import pandas as pd
from bson import json_util
global_config = None
global_client = None
global_stocklist = None
def getConfig(root_path):
global global_config
if global_config is None:
#print("initial Config...")
global_config = configparser.ConfigParser()
global_config.read(root_path + "/" + "config.ini")
return global_config
def getClient():
global global_client
from pymongo import MongoClient
if global_client is None:
#print("initial DB Client...")
global_client = MongoClient('localhost', 27017)
return global_client
def getCollection(database, collection):
client = getClient()
db = client[database]
return db[collection]
def getStockList(root_path, database, sheet):
global global_stocklist
if global_stocklist is None:
#print("initial Stock List...")
global_stocklist = queryStockList(root_path, database, sheet)
return global_stocklist
def setStockList(df):
global global_stocklist
df.set_index('symbol', inplace=True)
global_stocklist = df
return global_stocklist
def readFromCollection(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find(queryString)
df = pd.DataFrame(list(result))
if df.empty == False: del df['_id']
return df
def writeToCollection(collection, df, id = None):
jsonStrings = df.to_json(orient='records')
bsonStrings = json_util.loads(jsonStrings)
for string in bsonStrings:
if id is not None:
id_string = ''.join([string[item] for item in id])
string['_id'] = id_string
collection.save(string)
def readFromCollectionExtend(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find_one(queryString)
if result is None:
return pd.DataFrame(), {}
return pd.read_json(result['data'], orient='records'), result['metadata']
def writeToCollectionExtend(collection, symbol, df, metadata=None):
jsonStrings = {"_id":symbol, "symbol":symbol, "data":df.to_json(orient='records'), "metadata":metadata}
#bsonStrings = json_util.loads(jsonStrings)
collection.save(jsonStrings)
def writeToCSV(csv_dir, CollectionKey, df):
if os.path.exists(csv_dir) == False:
os.makedirs(csv_dir)
filename = csv_dir + CollectionKey + '.csv'
df.to_csv(filename)
def queryStockList(root_path, database, sheet):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False: df = setStockList(df)
return df
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=0)
if df.empty == False: df = setStockList(df)
return df
return pd.DataFrame()
except Exception as e:
print("queryStockList Exception", e)
return pd.DataFrame()
return pd.DataFrame()
def storeStockList(root_path, database, sheet, df, symbol = None):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
if symbol is not None:
df = df[df.index == symbol].reset_index()
writeToCollection(collection, df, ['symbol'])
# try:
# index_info = collection.index_information()
# print("index info", index_info)
# except Exception as e:
# print(e)
# writeToCollection(collection, df)
# #collection.create_index('symbol', unique=True, drop_dups=True)
# else:
# writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storeStockList Exception", e)
def queryStockPublishDay(root_path, database, sheet, symbol):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename) == False: return ''
df = pd.read_csv(filename, index_col=["index"])
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
except Exception as e:
print("queryStockPublishDay Exception", e)
return ''
return ''
def storePublishDay(root_path, database, sheet, symbol, date):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=["index"])
publishDate = df[df['symbol'] == symbol]
if publishDate.empty:
df.loc[len(df)] = [symbol, date]
else:
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storePublishDay Exception", e)
def queryStock(root_path, database, sheet_1, sheet_2, symbol, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
stockList = getStockList(root_path, database, sheet_1)
lastUpdateTime = pd.Timestamp(stockList.loc[symbol][update_key])
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
queryString = { "symbol" : symbol }
df, metadata = readFromCollectionExtend(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
df.set_index('date', inplace=True)
if 'index' in df:
del df['index']
return df, lastUpdateTime
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = csv_dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename, index_col=["date"])
return df, lastUpdateTime
except Exception as e:
print("queryStock Exception", e)
return pd.DataFrame(), lastUpdateTime
return pd.DataFrame(), lastUpdateTime
def storeStock(root_path, database, sheet_1, sheet_2, symbol, df, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = getStockList(root_path, database, sheet_1)
if (stockList[stockList.index == symbol][update_key][0] != now_date):
stockList.set_value(symbol, update_key, now_date)
storeStockList(root_path, database, sheet_1, stockList, symbol)
# df.set_index('date')
# df.index = df.index.astype(str)
# df.sort_index(ascending=True, inplace=True)
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = df.reset_index()
if 'date' in df: df.date = df.date.astype(str)
writeToCollectionExtend(collection, symbol, df, {})
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database)+ config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeStock Exception", e)
def queryNews(root_path, database, sheet, symbol):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
lastUpdateTime = pd.Timestamp(getStockList(root_path, database, 'SHEET_US_DAILY').loc[symbol]['news_update'])
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : symbol }
df = readFromCollection(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
#df.set_index('date', inplace=True)
return df, lastUpdateTime
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename)
return df, lastUpdateTime
except Exception as e:
print("queryNews Exception", e)
return pd.DataFrame(), lastUpdateTime
return pd.DataFrame(), lastUpdateTime
def storeNews(root_path, database, sheet, symbol, df):
config = getConfig(root_path)
storeType = int(global_config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = getStockList(root_path, database, 'SHEET_US_DAILY')
stockList.set_value(symbol, 'news_update', now_date)
storeStockList(root_path, database, "SHEET_US_DAILY", stockList.reset_index())
df = df.drop_duplicates(subset=['uri'], keep='first')
#df.set_index(['date'], inplace=True)
#df.sort_index(ascending=True, inplace=True)
try:
if storeType == 1:
collection = getCollection(database, sheet)
#df = df.reset_index()
df['symbol'] = symbol
writeToCollection(collection, df, ['symbol', 'uri'])
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeNews Exception", e)
def queryEarnings(root_path, database, sheet, date):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : date }
df, metadata = readFromCollectionExtend(collection, queryString)
return df
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + date + ".csv"
if os.path.exists(filename): return pd.read_csv(filename)
return pd.DataFrame()
except Exception as e:
print("queryEarnings Exception", e)
return pd.DataFrame()
return pd.DataFrame()
def storeEarnings(root_path, database, sheet, date, df):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
try:
if storeType == 1:
collection = getCollection(database, sheet)
writeToCollectionExtend(collection, date, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
writeToCSV(csv_dir, date, df)
except Exception as e:
print("storeNews Exception", e)
def queryTweets(root_path, database, sheet, symbol, col):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
lastUpdateTime = pd.Timestamp('1970-01-01')
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : symbol }
df, metadata = readFromCollectionExtend(collection, queryString)
if 'last_update' in metadata:
lastUpdateTime = pd.Timestamp(metadata['last_update'])
if df.empty: return | pd.DataFrame(columns=col) | pandas.DataFrame |
"""
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
@pytest.mark.parametrize("method", ["cummin", "cummax"])
def test_cummin_cummax(self, datetime_series, method):
ufunc = methods[method]
result = getattr(datetime_series, method)().values
expected = ufunc(np.array(datetime_series))
tm.assert_numpy_array_equal(result, expected)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = getattr(ts, method)()[1::2]
expected = ufunc(ts.dropna())
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ts",
[
pd.Timedelta(0),
pd.Timestamp("1999-12-31"),
pd.Timestamp("1999-12-31").tz_localize("US/Pacific"),
],
)
def test_cummin_cummax_datetimelike(self, ts):
# with ts==pd.Timedelta(0), we are testing td64; with naive Timestamp
# we are testing datetime64[ns]; with Timestamp[US/Pacific]
# we are testing dt64tz
tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "1 days", "NaT", "3 days"])
ser = pd.Series(tdi + ts)
exp_tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "2 days", "NaT", "3 days"])
expected = pd.Series(exp_tdi + ts)
result = ser.cummax(skipna=True)
tm.assert_series_equal(expected, result)
exp_tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "1 days", "NaT", "1 days"])
expected = pd.Series(exp_tdi + ts)
result = ser.cummin(skipna=True)
tm.assert_series_equal(expected, result)
exp_tdi = pd.to_timedelta(
["NaT", "2 days", "2 days", "2 days", "2 days", "3 days"]
)
expected = pd.Series(exp_tdi + ts)
result = ser.cummax(skipna=False)
tm.assert_series_equal(expected, result)
exp_tdi = pd.to_timedelta(
["NaT", "2 days", "2 days", "1 days", "1 days", "1 days"]
)
expected = pd.Series(exp_tdi + ts)
result = ser.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummethods_bool(self):
# GH#6270
# checking Series method vs the ufunc applied to the values
a = pd.Series([False, False, False, True, True, False, False])
c = pd.Series([False] * len(a))
for method in methods:
for ser in [a, ~a, c, ~c]:
ufunc = methods[method]
exp_vals = ufunc(ser.values)
expected = pd.Series(exp_vals)
result = getattr(ser, method)()
tm.assert_series_equal(result, expected)
def test_cummethods_bool_in_object_dtype(self):
ser = pd.Series([False, True, np.nan, False])
cse = pd.Series([0, 1, np.nan, 1], dtype=object)
cpe = pd.Series([False, 0, np.nan, 0])
cmin = pd.Series([False, False, np.nan, False])
cmax = pd.Series([False, True, np.nan, True])
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods:
res = getattr(ser, method)()
| tm.assert_series_equal(res, expecteds[method]) | pandas._testing.assert_series_equal |
"""
Test cases for the regi0.taxonomic._helpers.expand_results function.
"""
import pandas as pd
import pytest
from regi0.taxonomic._helpers import expand_result
@pytest.fixture()
def names():
return pd.Series(
[
None,
"Tremarctos ornatus",
"Panthera onca",
None,
"Panthera onca",
"Tremarctos ornatus",
]
)
@pytest.fixture()
def df():
return pd.DataFrame(
{
"kingdom": ["Animalia", "Animalia"],
"phylum": ["Chordata", "Chordata"],
"class": ["Mammalia", "Mammalia"],
"order": ["Carnivora", "Carnivora"],
"family": ["Ursidae", "Felidae"],
"genus": ["Tremarctos", "Panthera"],
"species": ["Tremarctos ornatus", "Panthera onca"],
}
)
@pytest.fixture()
def result(names, df):
return expand_result(df, names)
def test_order(result, names):
| pd.testing.assert_series_equal(result["species"], names, check_names=False) | pandas.testing.assert_series_equal |
import datetime
import pandas as pd
class solar_data:
# def __init__(self):
# '''
# 'data' is a dataframe
# '''
# self.data_df = data
def ingest_pvwatts(self, data):
'''
data must be a dataframe object.
This function adds a datetime column
'''
self.data_df = data
self.data_df["datetime"] = self.data_df.apply( lambda row:
datetime.datetime( year=2016, month=int(row["Month"]), day=int(row["Day"]),
hour=int(row["Hour"]) ), axis=1 )
def ingest_pvwatts_csv(self, filename):
df = pd.read_csv(filename,thousands=',')
self.ingest_pvwatts(df)
def ingest_daily_production_enlightenmanager_csv(self, filename):
df = pd.read_csv(filename,thousands=',', parse_dates=[0])
df.columns = ['datetime','Wh']
self.data_df = df
##
## Helpful Notes:
#df['Date/Time'] = pva_df['Date/Time'].apply(dateutil.parser.parse)
#df['Date/Time'] = pd.to_datetime(pva_df['Date/Time'])
def export_kwatts_for_elect_rates(self):
df = pd.DataFrame()
df["datetime"] = self.data_df["datetime"]
df["Value"] = self.data_df["AC System Output (W)"] /1000.0
return df
def export_daily_energy_from_pvwatts(self):
df = pd.DataFrame()
df["datetime"] = self.data_df["datetime"]
df["Wh"] = self.data_df["AC System Output (W)"]
df.index = df.datetime
df = df.resample("D", how='sum')
return df
## TODO: use date_range() and DatetimeIndex
def export_daily_energy_from_enlightenmanager(self):
df = | pd.DataFrame() | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.core.arrays.integer_array([1, 2]))
b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = Series([1])
s2 = Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
tm.assert_series_equal(result, expected)
def test_concat_sorts_columns(sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"]
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"])
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort():
# GH-4588
df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat([df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise():
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))])
def test_concat_series_name_npscalar_tuple(s1name, s2name):
# GH21015
s1 = Series({"a": 1, "b": 2}, name=s1name)
s2 = Series({"c": 5, "d": 6}, name=s2name)
result = pd.concat([s1, s2])
expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
tm.assert_series_equal(result, expected)
def test_concat_categorical_tz():
# GH-23816
a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
expected = Series(
[
pd.Timestamp("2017-01-01", tz="US/Pacific"),
pd.Timestamp("2017-01-02", tz="US/Pacific"),
"a",
"b",
]
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_unchanged():
# GH-12007
# test fix for when concat on categorical and float
# coerces dtype categorical -> float
df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
result = pd.concat([df, ser], axis=1)
expected = DataFrame(
{
"A": Series(["a", "b", "c", np.nan], dtype="category"),
"B": Series([0, 1, np.nan, 2], dtype="float"),
}
)
tm.assert_equal(result, expected)
def test_concat_empty_df_object_dtype():
# GH 9149
df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
df_2 = DataFrame(columns=df_1.columns)
result = pd.concat([df_1, df_2], axis=0)
expected = df_1.astype(object)
tm.assert_frame_equal(result, expected)
def test_concat_sparse():
# GH 23557
a = Series(SparseArray([0, 1, 2]))
expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.int64, 0)
)
result = pd.concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_dense_sparse():
# GH 30668
a = Series(pd.arrays.SparseArray([1, None]), dtype=float)
b = Series([1], dtype=float)
expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(
pd.SparseDtype(np.float64, None)
)
result = pd.concat([a, b], axis=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
def test_concat_copy_index(test_series, axis):
# GH 29879
if test_series:
ser = Series([1, 2])
comb = concat([ser, ser], axis=axis, copy=True)
assert comb.index is not ser.index
else:
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
comb = concat([df, df], axis=axis, copy=True)
assert comb.index is not df.index
assert comb.columns is not df.columns
@pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]])
def test_duplicate_keys(keys):
# GH 33654
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
s1 = Series([7, 8, 9], name="c")
s2 = Series([10, 11, 12], name="d")
result = concat([df, s1, s2], axis=1, keys=keys)
expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
expected_columns = pd.MultiIndex.from_tuples(
[(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")]
)
expected = DataFrame(expected_values, columns=expected_columns)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"obj",
[
tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
tm.SubclassedSeries(np.arange(0, 10), name="A"),
],
)
def test_concat_preserves_subclass(obj):
# GH28330 -- preserve subclass
result = concat([obj, obj])
assert isinstance(result, type(obj))
def test_concat_frame_axis0_extension_dtypes():
# preserve extension dtype (through common_dtype mechanism)
df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")})
df2 = DataFrame({"a": np.array([4, 5, 6])})
result = pd.concat([df1, df2], ignore_index=True)
expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
result = pd.concat([df2, df1], ignore_index=True)
expected = DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
def test_concat_preserves_extension_int64_dtype():
# GH 24768
df_a = DataFrame({"a": [-1]}, dtype="Int64")
df_b = DataFrame({"b": [1]}, dtype="Int64")
result = pd.concat([df_a, df_b], ignore_index=True)
expected = | DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64") | pandas.DataFrame |
import argparse
import sys
from pathlib import Path
import cli
def predict(args):
import pandas as pd
import system.datasets as datasets
import system.inference as inference
# Load the relevant dataset metadata
dataset = datasets.dataset(args.dataset, args.arca23k_dir,
args.fsd50k_dir, args.params.seed)
subset = dataset[f'{args.subset}']
# Generate predictions using several model checkpoints
checkpoint_dir = args.work_dir / args.experiment_id / 'checkpoints'
epochs = select_epochs(args.work_dir / args.experiment_id / 'logs')
y_pred = inference.predict(subset, epochs, checkpoint_dir,
**vars(args.params))
# Ensure output directory exists
pred_dir = args.work_dir / args.experiment_id / 'predictions'
pred_dir.mkdir(parents=True, exist_ok=True)
# Save predictions to disk
index = subset.tags.index
if args.output_name:
output_path = pred_dir / args.output_name
else:
output_path = pred_dir / f'{args.dataset.lower()}_{args.subset}.csv'
df = pd.DataFrame(y_pred, index, dataset.label_set)
df.to_csv(output_path)
print(f'\nPredictions written to {output_path}')
# Delete unused checkpoint files if applicable
if args.clean:
count = delete_checkpoints(checkpoint_dir, epochs)
print(f'\nRemoved {count} unused checkpoint files')
def select_epochs(log_dir, metric='val_mAP', n_epochs=3, min_epoch=10):
import pandas as pd
df = | pd.read_csv(log_dir / 'history.csv', index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import os
import argparse
from pathlib import Path
import joblib
import scipy.sparse
import string
import nltk
from nltk import word_tokenize
nltk.download('punkt')
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelBinarizer
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
'''
Preprocessing and preperation of data:
The purpose of this script is to prepare and preproces the raw textual data and the admission data needed for training and testing the classification model. This proces includes the following steps:
1. Clean and prepare admission data
2. Extract discharge summaries from note data
3. Remove newborn cases and in-hospital deaths
4. Bind note-data to 30-day readmission information
5. Split into train, validation and test set and balance training data by oversampling positive cases
6. Removal of special characters, numbers and de-identified brackets
7. Vectorise all discharge notes:
7a. Remove stop-words, most common words and very rare words (benchmarks need to be defined)
7b. Create set of TF-IDF weighted tokenised discharge notes
8. Output datasets and labels as CSV-files
'''
# Defining main function
def main(args):
notes_file = args.nf
admissions_file = args.af
NotePreprocessing(notes_file = notes_file, admissions_file = admissions_file)
# Defining class 'NotePreprocessing'
class NotePreprocessing:
def __init__(self, notes_file, admissions_file):
# Setting directory of input data
data_dir = self.setting_data_directory()
# Setting directory of output plots
out_dir = self.setting_output_directory()
# Loading notes
if notes_file is None:
notes = pd.read_csv(data_dir / "NOTEEVENT.csv")
else:
notes = pd.read_csv(data_dir / notes_file)
# Loading general admission data
if admissions_file is None:
admissions = | pd.read_csv(data_dir / "ADMISSIONS.csv") | pandas.read_csv |
import blpapi
import datetime
import pandas as pd
import numpy as np
def check_date_time(value):
if not isinstance(value, datetime.datetime):
raise ValueError('The dates have to be datetime objects')
return None
def check_overrides(value):
if value != None:
if type(value) != dict:
raise ValueError('The overrides has to be a dictionary')
return None
def check_other_param(value):
if value != None:
if type(value) != dict:
raise ValueError('The other_param argument has to be a dictionary')
return None
class BLP():
def __init__(self):
self.boo_getIntradayBar = False
self.boo_getIntradayTick = False
self.boo_getRefData = False
self.boo_getHistoData = False
self.dictData = {}
self.list_df_buffer = [] # Used to store the temporary dataframes
self.BAR_DATA = blpapi.Name("barData")
self.BAR_TICK_DATA = blpapi.Name("barTickData")
self.CATEGORY = blpapi.Name("category")
self.CLOSE = blpapi.Name("close")
self.FIELD_DATA = blpapi.Name("fieldData")
self.FIELD_ID = blpapi.Name("fieldId")
self.HIGH = blpapi.Name("high")
self.LOW = blpapi.Name("low")
self.MESSAGE = blpapi.Name("message")
self.NUM_EVENTS = blpapi.Name("numEvents")
self.OPEN = blpapi.Name("open")
self.RESPONSE_ERROR = blpapi.Name("responseError")
self.SECURITY_DATA = blpapi.Name("securityData")
self.SECURITY = blpapi.Name("security")
self.SESSION_TERMINATED = blpapi.Name("SessionTerminated")
self.TIME = blpapi.Name("time")
self.VALUE = blpapi.Name("value")
self.VOLUME = blpapi.Name("volume")
self.TICK_DATA = blpapi.Name("tickData")
self.TICK_SIZE = blpapi.Name("size")
self.TYPE = blpapi.Name("type")
# Create a Session
self.session = blpapi.Session()
# Start a Session
if not self.session.start():
print("Failed to start session.")
return None
def printErrorInfo(self, leadingStr, errorInfo):
print ("%s%s (%s)" % (leadingStr, errorInfo.getElementAsString(self.CATEGORY),
errorInfo.getElementAsString(self.MESSAGE)))
return None
def check_service(self, service):
# Open service to get historical data from
if not (self.session.openService(service)):
print("Failed to open {}".format(service))
return None
def set_other_param(self, other_param, request):
if other_param != None:
for k, v in other_param.items():
request.set(k, v)
return request
def set_overrides(self, overrides, request):
if overrides != None:
req_overrides = request.getElement("overrides")
list_overrides = []
for fieldId, value in overrides.items():
list_overrides.append(req_overrides.appendElement())
list_overrides[-1].setElement("fieldId", fieldId)
list_overrides[-1].setElement("value", value)
return request
def eventLoop(self, session):
done = False
while not done:
event = session.nextEvent(20)
if event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
self.processResponseEvent(event)
elif event.eventType() == blpapi.Event.RESPONSE:
self.processResponseEvent(event)
done = True
else:
for msg in event:
if event.eventType() == blpapi.Event.SESSION_STATUS:
if msg.messageType() == self.SESSION_TERMINATED:
done = True
return None
def processResponseEvent(self, event):
for msg in event:
if msg.hasElement(self.RESPONSE_ERROR):
self.printErrorInfo("REQUEST FAILED: ", msg.getElement(self.RESPONSE_ERROR))
continue
if self.boo_getIntradayBar:
self.process_msg_intradaybar(msg)
elif self.boo_getIntradayTick:
self.process_msg_intradaytick(msg)
elif self.boo_getRefData:
self.process_msg_refdata(msg)
elif self.boo_getHistoData:
self.process_msg_histodata(msg)
return None
def get_intradaybar(self, security, event, start_date, end_date, barInterval, other_param):
self.boo_getIntradayBar = True
try:
self.check_service("//blp/refdata")
refDataService = self.session.getService("//blp/refdata")
request = refDataService.createRequest("IntradayBarRequest")
# Only one security/eventType per request
request.set("security", security)
request.set("eventType", event)
request.set("interval", barInterval)
# All times are in GMT
request.set("startDateTime", start_date)
request.set("endDateTime", end_date)
# Append other parameters if there are
request = self.set_other_param(other_param, request)
self.session.sendRequest(request)
self.eventLoop(self.session) # Wait for events from session
finally:
# Stop the session
self.session.stop()
df_buffer = pd.DataFrame.from_dict(self.dictData,
orient='index',
columns=['open', 'high', 'low', 'close', 'volume', 'numEvents', 'value'])
df_buffer['ticker'] = security
df_buffer = df_buffer.reset_index(level=0).rename(columns={'index': 'time'}).set_index(['time', 'ticker'])
return df_buffer.fillna(value=np.nan)
def process_msg_intradaybar(self, msg):
data = msg.getElement(self.BAR_DATA).getElement(self.BAR_TICK_DATA)
for bar in data.values():
time = bar.getElementAsDatetime(self.TIME)
open = bar.getElementAsFloat(self.OPEN)
high = bar.getElementAsFloat(self.HIGH)
low = bar.getElementAsFloat(self.LOW)
close = bar.getElementAsFloat(self.CLOSE)
numEvents = bar.getElementAsInteger(self.NUM_EVENTS)
volume = bar.getElementAsInteger(self.VOLUME)
value = bar.getElementAsInteger(self.VALUE)
self.dictData[time] = [open, high, low, close, volume, numEvents, value] # Increment rows in a dictionary
return None
def get_refdata(self, security, fields, overrides, other_param):
self.boo_getRefData = True
self.fields = fields
try:
self.check_service("//blp/refdata")
refDataService = self.session.getService("//blp/refdata")
request = refDataService.createRequest("ReferenceDataRequest")
# Append securities to request
for ticker in security:
request.append("securities", ticker)
# Append fields to request
for field in fields:
request.append("fields", field)
# Append other parameters if there are
request = self.set_other_param(other_param, request)
# Add overrides if there are
request = self.set_overrides(overrides, request)
self.session.sendRequest(request)
self.eventLoop(self.session) # Wait for events from session.
finally:
self.session.stop()
df_buffer = pd.DataFrame.from_dict(self.dictData, orient='index', columns=fields).fillna(value=np.nan)
return df_buffer
def process_msg_refdata(self, msg):
data = msg.getElement(self.SECURITY_DATA)
for securityData in data.values():
field_data = securityData.getElement(self.FIELD_DATA) # Element that contains all the fields
security_ticker = securityData.getElementAsString(self.SECURITY) # Get Ticker
self.dictData[security_ticker] = [] # Create list of fields
for my_field in self.fields:
if field_data.hasElement(my_field): # Check if the field exists for this particular ticker
self.dictData[security_ticker].append(field_data.getElement(my_field).getValue())
else:
self.dictData[security_ticker].append(None)
return None
def get_histodata(self,security, fields, start_date, end_date, overrides, other_param):
self.boo_getHistoData = True
self.fields = fields
try:
self.check_service("//blp/refdata")
# Obtain previously opened service
refDataService = self.session.getService("//blp/refdata")
# Create and fill the request for the historical data
request = refDataService.createRequest("HistoricalDataRequest")
# Append securities to request
for ticker in security:
request.getElement("securities").appendValue(ticker)
# Append fields to request
for field in fields:
request.getElement("fields").appendValue(field)
request.set("startDate", start_date.strftime('%Y%m%d'))
request.set("endDate", end_date.strftime('%Y%m%d'))
# Append other parameters if there are
request = self.set_other_param(other_param, request)
# Add overrides if there are
request = self.set_overrides(overrides, request)
self.session.sendRequest(request) # Send the request
self.eventLoop(self.session) # Wait for events from session.
finally:
# Stop the session
self.session.stop()
# Returns a pandas dataframe with a Multi-index (date/ticker)
df_buffer = pd.concat(self.list_df_buffer).reset_index(level=0).rename(columns={'index': 'date'}).set_index(['date', 'ticker'])
return df_buffer.fillna(value=np.nan)
def process_msg_histodata(self, msg):
dictData = {} # Used for structuring the data received from bloomberg
security_data = msg.getElement(self.SECURITY_DATA)
data = security_data.getElement(self.FIELD_DATA) # Iterable object that contains all the fields
security_ticker = security_data.getElementAsString(self.SECURITY) # Get Ticker (there is only one ticker by message)
for field_data in data.values(): # Iterate through each date
date = field_data.getElement('date').getValue()
dictData[date] = []
dictData[date].append(security_ticker)
for my_field in self.fields:
if field_data.hasElement(my_field): # Check if the field exists for this particular ticker
dictData[date].append(field_data.getElement(my_field).getValue()) # Increment dictionary
else:
dictData[date].append(None)
# Append data to the list of dataframe (concatenated in the end)
self.list_df_buffer.append(pd.DataFrame.from_dict(dictData, orient='index', columns=['ticker'] + self.fields))
return None
def get_intradaytick(self, ticker, list_events, start_date, end_date, condition_codes, other_param):
self.boo_getIntradayTick = True
try:
self.check_service("//blp/refdata")
refDataService = self.session.getService("//blp/refdata")
request = refDataService.createRequest("IntradayTickRequest")
# only one security/eventType per request
request.set("security", ticker)
# Add fields to request
for event in list_events:
request.getElement("eventTypes").appendValue(event)
# All times are in GMT
request.set("startDateTime", start_date)
request.set("endDateTime", end_date)
# Add condition codes
request.set("includeConditionCodes", condition_codes)
# Append other parameters if there are
request = self.set_other_param(other_param, request)
# Create set of column names if extra columns added to other_param
self.extra_columns = ['conditionCodes'] if condition_codes else []
if other_param != None:
for k,v in other_param.items():
if ('include' in k):
if v:
col_name = k.replace('include', '')
col_name = col_name[:1].lower() + col_name[1:]
self.extra_columns.append(col_name)
self.session.sendRequest(request)
self.eventLoop(self.session)
finally:
# Stop the session
self.session.stop()
df_buffer = pd.DataFrame.from_dict(self.dictData, orient='index', columns=['type', 'value', 'size'] + self.extra_columns)
df_buffer['ticker'] = ticker
df_buffer = df_buffer.reset_index(level=0).rename(columns={'index': 'time'}).set_index(['time', 'ticker'])
return df_buffer.fillna(value=np.nan)
def process_msg_intradaytick(self, msg):
data = msg.getElement(self.TICK_DATA).getElement(self.TICK_DATA)
for item in data.values():
time = item.getElementAsDatetime(self.TIME)
str_type = item.getElementAsString(self.TYPE)
value = item.getElementAsFloat(self.VALUE)
size = item.getElementAsInteger(self.TICK_SIZE)
self.dictData[time] = [str_type, value, size] # Increment rows in a dictionary
extra_data = []
for extra_col in self.extra_columns:
if item.hasElement(extra_col):
extra_data.append(item.getElement(extra_col).getValue())
else:
extra_data.append(None)
self.dictData[time] += extra_data
return None
def IntradayBar(security, event, start_date, end_date, barInterval, other_param=None):
'''
────────────────────────────────────────────────────────────────────────────────────────────────
┌─────────┐ from blp_pandas import blp_pandas as bbg
│ Example │
└─────────┘ df = bbg.IntradayBar(['CAC FP Equity', 'CACX LN Equity'],
['BID', 'ASK'],
datetime(2018,11,22,9,0),
datetime(2018,11,22,17,30),
1)
:return: pandas dataframe
'''
def get_tickerbar(ticker, event, start_date, end_date, barInterval, other_param):
'''
This nested function is called for each ticker so that the Bloomberg object is destroyed after each request
This is a thread-safe method
'''
objBBG = BLP() # Instantiate object with session
df_ticker = objBBG.get_intradaybar(ticker, event, start_date, end_date, barInterval, other_param) # Get data in dataframe
return df_ticker
#***************************
# Check the input variables
#***************************
check_date_time(start_date)
check_date_time(end_date)
if (type(barInterval) != int):
raise ValueError('The bar interval has to be an integer greater than 1')
elif barInterval < 1:
raise ValueError('The bar interval has to be an integer greater than 1')
if (type(security) != str) and (type(security) != list):
raise ValueError('The security parameter has to be a string or a list')
if (type(event) != str):
raise ValueError('The event has to be a string')
# ***************************
# Get data
# ***************************
if type(security) == str:
return get_tickerbar(security,event, start_date, end_date, barInterval, other_param)
elif type(security) == list:
listOfDataframes = []
for ticker in security:
listOfDataframes.append(get_tickerbar(ticker, event, start_date, end_date, barInterval, other_param))
return | pd.concat(listOfDataframes) | pandas.concat |
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenMetadata MlModel mixin test
"""
from unittest import TestCase
import pandas as pd
import sklearn.datasets as datasets
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from metadata.generated.schema.api.data.createMlModel import CreateMlModelRequest
from metadata.generated.schema.entity.data.mlmodel import MlModel
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
class OMetaModelMixinTest(TestCase):
"""
Test the MlModel integrations from MlModel Mixin
"""
server_config = MetadataServerConfig(api_endpoint="http://localhost:8585/api")
metadata = OpenMetadata(server_config)
iris = datasets.load_iris()
def test_get_sklearn(self):
"""
Check that we can ingest an SKlearn model
"""
df = | pd.DataFrame(self.iris.data, columns=self.iris.feature_names) | pandas.DataFrame |
"""
This file contains fixtures that are used at multiple points in the tests.
"""
import pytest
import numpy as np
import pandas as pd
from mokapot import LinearPsmDataset
@pytest.fixture(scope="session")
def psm_df_6():
"""A DataFrame containing 6 PSMs"""
data = {
"target": [True, True, True, False, False, False],
"spectrum": [1, 2, 3, 4, 5, 1],
"group": [1, 1, 2, 2, 2, 1],
"peptide": ["a", "b", "a", "c", "d", "e"],
"protein": ["A", "B"] * 3,
"feature_1": [4, 3, 2, 2, 1, 0],
"feature_2": [2, 3, 4, 1, 2, 3],
}
return pd.DataFrame(data)
@pytest.fixture()
def psm_df_1000(tmp_path):
"""A DataFrame with 1000 PSMs from 500 spectra and a FASTA file."""
rng = np.random.Generator(np.random.PCG64(42))
targets = {
"target": [True] * 500,
"spectrum": np.arange(500),
"group": rng.choice(2, size=500),
"peptide": [_random_peptide(5, rng) for _ in range(500)],
"score": np.concatenate(
[rng.normal(3, size=200), rng.normal(size=300)]
),
"filename": "test.mzML",
"calcmass": rng.uniform(500, 2000, size=500),
"expmass": rng.uniform(500, 2000, size=500),
"ret_time": rng.uniform(0, 60 * 120, size=500),
"charge": rng.choice([2, 3, 4], size=500),
}
decoys = {
"target": [False] * 500,
"spectrum": np.arange(500),
"group": rng.choice(2, size=500),
"peptide": [_random_peptide(5, rng) for _ in range(500)],
"score": rng.normal(size=500),
"filename": "test.mzML",
"calcmass": rng.uniform(500, 2000, size=500),
"expmass": rng.uniform(500, 2000, size=500),
"ret_time": rng.uniform(0, 60 * 120, size=500),
"charge": rng.choice([2, 3, 4], size=500),
}
fasta_data = "\n".join(
_make_fasta(100, targets["peptide"], 10, rng)
+ _make_fasta(100, decoys["peptide"], 10, rng, "decoy")
)
fasta = tmp_path / "test_1000.fasta"
with open(fasta, "w+") as fasta_ref:
fasta_ref.write(fasta_data)
return (pd.concat([pd.DataFrame(targets), | pd.DataFrame(decoys) | pandas.DataFrame |
#**************************************************************************************#
# Project: River Node
# Authors: <NAME>
# Department: CIDSE
# Semester: Fall 2016/Spring 2017
# Course Number and Name: CSE 492/493 Honors Thesis
# Supervisors: Dr. <NAME> & Dr. <NAME>
#**************************************************************************************#
#**************************************************************************************#
#**************************************************************************************#
# BROKEN - Cannot animate basemap
#**************************************************************************************#
#**************************************************************************************#
# STANDARD LIBRARIES
import pandas as pd
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.patches as mpatches
import matplotlib.animation as animation
import numpy as np
# MY FILES
from data_calc import *
from list_conversions import *
#from map_data import map_points
import variables
variables.init()
#**************************************************************************************#
# Functions #
#**************************************************************************************#
def update_data():
feed_data = | pd.read_json('https://io.adafruit.com/api/v2/specialKody/feeds/river-node-location-ph/data') | pandas.read_json |
import numpy as np
import os
import pandas as pd
from nose.tools import ok_, eq_, assert_almost_equal, with_setup
from conceptnet5.uri import is_term
from conceptnet5.vectors import get_vector
from conceptnet5.vectors.query import VectorSpaceWrapper
from conceptnet5.vectors.transforms import standardize_row_labels, l1_normalize_columns, \
l2_normalize_rows, shrink_and_sort
DATA = os.environ.get("CONCEPTNET_BUILD_DATA", "testdata")
TEST_FRAME = None
def setup_simple_frame():
data = [[4, 4, 4],
[1, 1, 1],
[1, 2, 10],
[3, 3, 4],
[2, 3, 4],
[2, 3, 5],
[7, 2, 7],
[3, 8, 2]]
index = ['island', 'Island', 'cat', 'figure', 'figure skating', 'figure skater','thing','17']
global TEST_FRAME
TEST_FRAME = pd.DataFrame(data=data, index=index)
def setup_multi_ling_frame():
data = [[8, 10, 3],
[4, 5, 6],
[4, 4, 5],
[10, 6, 12],
[10, 7, 11]]
index = ['/c/pl/kombinacja',
'/c/en/ski_jumping',
'/c/en/nordic_combined',
'/c/en/present',
'/c/en/gift']
global TEST_FRAME
TEST_FRAME = pd.DataFrame(data=data, index=index)
@with_setup(setup_simple_frame)
def test_get_vector():
ok_(get_vector(TEST_FRAME, '/c/en/cat').equals(get_vector(TEST_FRAME, 'cat', 'en')))
@with_setup(setup_simple_frame())
def test_vector_space_wrapper():
"""
Check if VectorSpaceWrapper's index is sorted and its elements are concepts.
"""
wrap = VectorSpaceWrapper(frame=TEST_FRAME)
wrap.load()
ok_(all(is_term(label) for label in wrap.frame.index))
ok_(wrap.frame.index.is_monotonic_increasing)
# test there are no transformations to raw terms other than adding the english tag
ok_('/c/en/figure skater' in wrap.frame.index) # no underscore
ok_('/c/en/Island' in wrap.frame.index) # no case folding
# test index_prefix_range
ok_(wrap.index_prefix_range('/c/en/figure') == (3, 6))
ok_(wrap.index_prefix_range('/c/en/skating') == (0, 0))
# test_similar_terms
ok_('/c/en/figure skating' in wrap.similar_terms('/c/en/figure skating', limit=3).index)
ok_('/c/en/figure skater' in wrap.similar_terms('/c/en/figure skating', limit=3).index)
ok_('/c/en/figure' in wrap.similar_terms('/c/en/figure skating', limit=3).index)
@with_setup(setup_multi_ling_frame)
def test_vector_space_wrapper_filter():
wrap = VectorSpaceWrapper(frame=TEST_FRAME)
wrap.load()
ok_('/c/pl/kombinacja' in wrap.similar_terms('/c/en/nordic_combined', filter='/c/pl',
limit=1).index)
ok_('/c/en/present' in wrap.similar_terms('/c/en/gift', filter='/c/en/present', limit=1).index)
@with_setup(setup_multi_ling_frame)
def test_missing_language():
wrap = VectorSpaceWrapper(frame=TEST_FRAME)
wrap.load()
# The frame contains no Esperanto, of course, so the out-of-vocabulary
# mechanism will fail. We should simply get no results, not crash.
similarity = wrap.similar_terms('/c/eo/ekzemplo')
eq_(len(similarity), 0)
@with_setup(setup_simple_frame)
def test_standardize_row_labels():
vec1 = TEST_FRAME.loc['island']
vec2 = TEST_FRAME.loc['Island']
vec3 = TEST_FRAME.loc['thing']
standardized_vectors = standardize_row_labels(TEST_FRAME)
# Check if all labels are terms
ok_(all(is_term(label) for label in standardized_vectors.index))
# Check if all terms standardized to the same concept are merged
ok_(standardized_vectors.index.is_unique)
ok_('/c/en/Island' not in standardized_vectors.index)
ok_('/c/en/island' in standardized_vectors.index)
ok_('/c/en/thing' in standardized_vectors.index)
ok_(standardized_vectors.loc['/c/en/island'].equals( | pd.Series([3.0, 3.0, 3.0]) | pandas.Series |
from datetime import date, datetime
import calendar
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
from pandas.plotting import register_matplotlib_converters
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#Scale the data to be appropriate for PCA
def scale_data(angle_data):
scaled = preprocessing.scale(angle_data)
return scaled
#Perform PCA on the data
def pca(angles, name, dir_name, graph_type):
#Get all the PCA components from the data
pca = PCA()
pc_angles = angles.drop(angles.index[0.0])
data = scale_data(pc_angles)
pca.fit(data)
pca_data = pca.transform(data)
per_var = np.round(pca.explained_variance_ratio_ * 100, decimals = 1)
labels = ['PC' + str(x) for x in range(1, len(per_var) + 1)]
#Create a Scree Plot of the components
plt.close()
plt.bar(x = range(1, len(per_var) + 1), height = per_var, tick_label = labels)
plt.ylabel('Percentage of Explained Variance')
plt.xlabel('Principal Component')
plt.title('Scree Plot')
plt.savefig(dir_name + 'Scree Plot - ' + name + '.png')
plt.show()
plt.close()
pca_df = pd.DataFrame(pca_data, columns = labels)
#Generate the PCA Graph based on PC1 and PC2
if graph_type == '2d':
plt.scatter(pca_df.PC1, pca_df.PC2, s = 0.01)
plt.title('Torsion Angle PCA Graph')
plt.xlabel('PC1 - {0}%'.format(per_var[0]))
plt.ylabel('PC2 - {0}%'.format(per_var[1]))
plt.savefig(dir_name + '2D PCA - ' + name + '.png')
plt.close()
#Generate the PCA Graph Based on PC1, PC2, and PC3
elif graph_type == '3d':
ax = plt.axes(projection = '3d')
ax.scatter3D(pca_df.PC1, pca_df.PC2, pca_df.PC3, s = 0.01)
plt.xlabel('PC1 - {0}%'.format(per_var[0]))
plt.ylabel('PC2 - {0}%'.format(per_var[1]))
plt.ylabel('PC3 - {0}%'.format(per_var[2]))
plt.savefig(dir_name + '3D PCA - ' + name + '.png')
plt.close()
else:
raise Exception('Graph Type must be either "2d" or "3d".')
return pca_df, pca
def load_score(pca, PC, n):
# loading scores
if PC.lower() == "all":
all_scores = map(lambda x: | pd.Series(pca.components_[x]) | pandas.Series |
# Copyright (C) 2020 <NAME>, <NAME>
# Code -- Study 1 -- What Personal Information Can a Consumer Facial Image Reveal?
# https://github.com/computationalmarketing/facialanalysis/
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.ticker as mtick
import matplotlib.image as mpimg
from matplotlib import gridspec
from matplotlib import rcParams
rcParams.update({'font.size': 12})
rcParams['font.family'] = 'serif'
rcParams['font.sans-serif'] = ['Times']
import seaborn as sns
from textwrap import wrap
import torchvision.models as models
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import os
from os import walk
from tqdm import tqdm
from sklearn.utils import class_weight
from sklearn import metrics, svm
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import TruncatedSVD, PCA
from sklearn.model_selection import KFold, GroupKFold, ShuffleSplit, GroupShuffleSplit
from sklearn.metrics import confusion_matrix
import scipy.stats
from scipy.special import softmax
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import dendrogram, linkage
# ATTENTION: we disable notifications when AUC cannot be computed
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
import json
import numpy as np
from torchvision import transforms
from torch.utils.data.dataset import Dataset
from PIL import Image
import pandas as pd
import pickle
'''
q_to_name_dict contains match between variable labels from the survey results file and a label of the variable
'''
q_to_name_dict = {#'Q11':'gender', #'Q12':'age', 'Q13':'race', 'Q14':'school', # these variables expanded below
'Q15':'marital_status',
#'Q16':'employment',
'Q17':'social_class', #'Q18':'religion', # NO VARIANCE, SO EXCLUDED 'Q19':'US_born',
'Q21':'body_fitness', #'Q22':'household_income', 'Q23':'zip_code',
'Q24':'orientation',
#'Q25':'political_party',
'Q26':'global_warming', 'Q27':'recycling', 'Q28':'religious',
'Q29':'offensive_ads_banned', 'Q30':'offensive_ads_brand',#'Q31':'facebook_evil',
'Q32':'NRA_support',
'Q34':'bin_family_career', 'Q35':'bin_friendship_laws', 'Q36':'bin_freedom_truth',
'Q37':'bin_pleasure_duty', 'Q38':'bin_wealth_fame', 'Q39':'bin_politeness_honesty',
'Q40':'bin_beautiful_smart', 'Q41':'bin_belonging_independence',
'Q42_1': 'lfstl_set_routine',
'Q42_4': 'lfstl_try_new_things',
'Q42_5': 'lfstl_highly_social_many_friends',
'Q42_6': 'lfstl_buy_new_before_others',
'Q42_7': 'lfstl_outgoing_soc_confident',
'Q42_8': 'lfstl_compulsive_purchases',
'Q42_10': 'lfstl_political_protest_participation',
'Q42_11': 'lfstl_donate_to_beggar',
'Q42_12': 'lfstl_like_hunting',
'Q42_13': 'lfstl_like_fishing',
'Q42_14': 'lfstl_like_hiking',
'Q42_15': 'lfstl_like_out_of_doors',
'Q42_16': 'lfstl_cabin_by_quiet_lake_spend_summer',
'Q42_17': 'lfstl_good_fixing_mechanical_things',
'Q42_18': 'lfstl_repair_my_own_car',
'Q42_19': 'lfstl_like_war_stories',
'Q42_20': 'lfstl_do_better_than_avg_fist_fight',
'Q42_21': 'lfstl_would_want_to_be_prof_football_player',
'Q42_22': 'lfstl_would_like_to_be_policeman',
'Q42_23': 'lfstl_too_much_violence_on_tv',
'Q42_24': 'lfstl_should_be_gun_in_every_home',
'Q42_25': 'lfstl_like_danger',
'Q42_26': 'lfstl_would_like_my_own_airplane',
'Q42_27': 'lfstl_like_to_play_poker',
'Q42_28': 'lfstl_smoke_too_much',
'Q42_29': 'lfstl_love_to_eat',
'Q42_30': 'lfstl_spend_money_on_myself_that_shuld_spend_on_family',
'Q42_31': 'lfstl_if_given_chance_men_would_cheat_on_spouses',
'Q42_33': 'lfstl_satisfied_with_life',
'Q42_34': 'lfstl_like_to_be_in_charge',
'Q42_35': 'lfstl_enjoy_shopping',
'Q42_36': 'lfstl_plan_spending_carefully',
'Q42_37': 'lfstl_obey_rules',
'Q43_1': 'lfstl_satisfied_with_weight',
'Q43_4': 'lfstl_regular_exercise_routine',
'Q43_5': 'lfstl_grew_up_eating_healthy_foods',
'Q43_7': 'lfstl_hard_to_be_disciplined_about_what_i_eat',
'Q43_9': 'lfstl_dont_have_to_worry_how_i_eat',
'Q43_11': 'lfstl_never_think_healthy_unhealthy_food',
'Q43_13': 'lfstl_stick_to_healthy_diet_for_family',
'Q43_14': 'lfstl_choose_snack_foods_that_give_vitamins_minerals',
'Q44_1': 'lfstl_often_prepare_sauces_dips_from_scratch',
'Q44_5': 'lfstl_dont_have_much_interest_cooking',
'Q44_6': 'lfstl_seek_out_healthy_foods',
'Q44_8': 'lfstl_read_ingreadients_list_on_the_label',
'Q44_9': 'lfstl_looking_for_new_products_when_at_grocery_store',
'Q44_11': 'lfstl_lower_priced_products_same_as_higher_priced',
'Q44_13': 'lfstl_look_for_authentic_ingredients_flavors',
'Q44_14': 'lfstl_like_ethnic_foods',
'Q44_15': 'lfstl_daring_adventurous_trying_new_foods',
'Q45_42': 'brkfst_none',
'Q45_43': 'brkfst_bar',
'Q45_44': 'brkfst_fruit',
'Q45_45': 'brkfst_nuts',
'Q45_46': 'brkfst_regular_yogurt',
'Q45_47': 'brkfst_greek_yogurt',
'Q45_48': 'brkfst_muffin_croissant',
'Q45_49': 'brkfst_cold_cereal',
'Q45_50': 'brkfst_hot_cereal_oatmeal',
'Q45_51': 'brkfst_frozen_waffle',
'Q45_52': 'brkfst_cheese_cottage_cheese',
'Q45_53': 'brkfst_sandwhich',
'Q45_54': 'brkfst_salad',
'Q45_55': 'brkfst_eggs',
'Q45_56': 'brkfst_meat',
'Q45_57': 'brkfst_chicken',
'Q45_58': 'brkfst_fish',
'Q45_59': 'brkfst_potatoes',
'Q45_60': 'brkfst_vegetables',
'Q45_61': 'brkfst_soup',
'Q45_62': 'brkfst_pasta',
'Q45_63': 'brkfst_hummus',
'Q45_64': 'brkfst_bread_toast',
'Q45_65': 'brkfst_bagel_roll',
'Q45_66': 'brkfst_chocolate_candy',
'Q45_67': 'brkfst_cake_cookies',
'Q45_68': 'brkfst_chips',
'Q45_69': 'brkfst_crackers',
'Q45_70': 'brkfst_pretzels',
'Q45_71': 'brkfst_smoothie',
'Q45_72': 'brkfst_pastry_buns_fruit_pies',
'Q45_73': 'brkfst_brownies_snack_cakes',
'Q45_74': 'brkfst_popcorn',
'Q45_75': 'brkfst_ice_cream_sorbet',
'Q45_76': 'brkfst_pudding_gelatin',
'Q45_77': 'brkfst_refrig_dip_salsa_guacamole_dairy',
'Q46_1': 'rsn_brkfst_gives_energy',
'Q46_4': 'rsn_brkfst_tide_over_next_meal',
'Q46_5': 'rsn_brkfst_great_taste',
'Q46_6': 'rsn_brkfst_satisfies_craving',
'Q46_7': 'rsn_brkfst_comforting_soothing',
'Q46_8': 'rsn_brkfst_healthy_good_guilt_free',
'Q46_9': 'rsn_brkfst_take_care_of_hunger_filling',
'Q46_10': 'rsn_brkfst_not_too_filling',
'Q46_11': 'rsn_brkfst_fits_with_who_i_am',
'Q46_12': 'rsn_brkfst_helps_relax_reduce_stress',
'Q46_13': 'rsn_brkfst_helps_control_weight',
'Q46_14': 'rsn_brkfst_helps_maintain_mental_focus',
'Q46_15': 'rsn_brkfst_keeps_from_overeating_next_meal',
'Q46_16': 'rsn_brkfst_great_texture',
'Q46_17': 'rsn_brkfst_sweet_taste',
'Q46_18': 'rsn_brkfst_tangy_savory_taste',
'Q46_19': 'rsn_brkfst_chunky_multidim_texture',
'Q46_20': 'rsn_brkfst_smooth_creamy_texture',
'Q46_21': 'rsn_brkfst_gives_protein',
'Q46_22': 'rsn_brkfst_keeps_me_going',
'Q46_23': 'rsn_brkfst_good_food_to_eat_with_others',
'Q46_24': 'rsn_brkfst_keeps_me_on_track',
'Q46_25': 'rsn_brkfst_like_ingredients',
'Q46_26': 'rsn_brkfst_refreshing_taste',
'Q47':'pay_organic', 'Q48':'alcohol', 'Q49':'credit_score',
'Q50_1':'em_happiness', 'Q50_2':'em_stress', 'Q50_3':'em_loneliness',
'Q50_4':'em_jealousy', 'Q50_5':'em_fear', 'Q50_6':'em_hopefulness',
'Q50_7':'em_regret', 'Q50_8':'em_optimism', 'Q50_9':'em_contentness',
'Q50_10':'em_gratitude', 'Q50_11':'em_guilt', 'Q50_12':'em_anger',
'Q50_13':'em_joy', 'Q50_14':'em_contempt', 'Q50_15':'em_disgust',
'Q50_16':'em_sadness', 'Q50_17':'em_surprise', 'Q50_18':'em_vulnerability',
'Q50_19':'em_curiosity', 'Q50_20':'em_warmth',
'Q51':'entertain_freq', 'Q52_1':'post_lik_pos', 'Q52_2':'post_lik_neg',
'Q53':'movie_activ_rec', 'Q54':'rec_lik_ask', 'Q55':'rec_lik_follow',
'Q56_1': 'bp_is_talkative',
'Q56_4': 'bp_tends_to_find_faults_with_others',
'Q56_5': 'bp_does_thorough_job',
'Q56_6': 'bp_is_depressed_blue',
'Q56_7': 'bp_is_original_comes_up_new_ideas',
'Q56_8': 'bp_is_helpful_unselfish',
'Q56_9': 'bp_is_relaxed_handles_stress_well',
'Q56_10': 'bp_is_curious_many_different_things',
'Q56_11': 'bp_is_full_of_energy',
'Q56_12': 'bp_starts_quarrels_with_others',
'Q56_13': 'bp_can_be_tense',
'Q56_14': 'bp_is_ingenious_deep_thinker',
'Q56_15': 'bp_has_forgiving_nature',
'Q56_16': 'bp_tends_to_be_lazy',
'Q56_17': 'bp_is_emotionally_stable_not_easily_upset',
'Q56_18': 'bp_is_inventive',
'Q56_19': 'bp_has_assertive_personality',
'Q56_20': 'bp_can_be_cold_aloof',
'Q56_21': 'bp_perserveres_until_task_finished',
'Q56_22': 'bp_can_be_moody',
'Q56_23': 'bp_values_artistic_aesthetic_experience',
'Q56_24': 'bp_is_sometimes_shy_inhibited',
'Q56_25': 'bp_is_considerate_kind_almost_everything',
'Q56_26': 'bp_does_things_efficiently',
'Q56_27': 'bp_remains_calm_in_tense_situations',
'Q56_28': 'bp_prefers_routine_work',
'Q56_29': 'bp_is_outgoing_sociable',
'Q56_30': 'bp_is_sometimes_rude_to_others',
'Q56_31': 'bp_makes_plans_follows_through',
'Q56_32': 'bp_gets_nervous_easily',
'Q56_33': 'bp_likes_to_reflect_play_with_ideas',
'Q56_39': 'bp_likes_to_cooperate_with_others',
'Q56_40': 'bp_is_easily_distracted',
'Q56_41': 'bp_is_sophisticated_arts_music_literature',
'Q56_42': 'bp_generates_enthusiasm',
'Q56_43': 'bp_is_reliable_worker',
'Q56_44': 'bp_is_reserved',
'Q56_45': 'bp_can_be_somewhat_careless',
'Q56_46': 'bp_tends_to_be_disorganized',
'Q56_47': 'bp_worries_a_lot',
'Q56_48': 'bp_has_active_imagination',
'Q56_49': 'bp_tends_to_be_quiet',
'Q56_50': 'bp_is_generally_trusting',
'Q56_52': 'bp_has_few_artistic_interests',
'Q57_1':'use_facebook', 'Q57_2':'use_twitter', 'Q57_3':'use_netflix',
'Q57_4':'use_spotify', 'Q57_5':'use_apple_music', 'Q57_6':'use_tinder',
'Q57_7':'use_pandora', 'Q57_9':'use_amazon',
'Q57_11':'use_saks', 'Q57_13':'use_dropbox',
'Q57_14':'use_gmail', 'Q57_15':'use_hotmail',
'Q57_16':'use_yahoo', 'Q57_18':'use_github',
'Q57_20':'use_shazam', 'Q57_21':'use_snapchat',
'Q57_22':'use_whatsapp', 'Q57_23':'use_instagram',
'Q57_24':'use_telegram', 'Q57_27':'use_hulu',
'Q57_30':'use_bloomingdales', 'Q57_31':'use_NYT',
'Q57_32':'use_WSJ',
'Q59' : 'netflix_frequent_viewer',
'Q60' : 'netflix_binger',
'Q61' : 'netflix_active_recommender',
'Q62' : 'netflix_intend_to_get',
'Q63':'superbowl', 'Q64_1':'TV_news_trust', 'Q64_2':'Internet_news_trust',
'Q65':'track_news_daily', 'Q66':'read_reviews', #'Q67':'sports_programming',
'Q68':'social_media_time', 'Q69':'social_media_posting', #'Q70':'video_watching',
'Q73':'bin_iphone_galaxy', 'Q74':'bin_clothing_tech', 'Q75':'bin_brand_recogn_not',
'Q76':'bin_chocolate_strawberry', 'Q77':'bin_coke_original_diet',
'Q78':'bin_coke_pepsi', 'Q79':'bin_club_book', 'Q80':'bin_beach_mountain',
'Q81':'bin_story_tell_listen', 'Q82':'bin_capitalism_socialism',
'Q83':'bin_children_not', 'Q84':'bin_thinking_acting', 'Q85':'bin_planning_spontaneity',
'Q86':'bin_trump_hillary', 'Q87':'bin_madonna_lady_gaga', 'Q88':'bin_beatles_michael_jackson',
'Q89':'ec_past_fin_better', 'Q90':'ec_fut_fin_better', 'Q91':'ec_good_times',
'Q92':'ec_depression', 'Q93':'ec_buy',
'Q94_1' : 'price_bicycle',
'Q94_4' : 'price_smartphone',
'Q94_5' : 'price_laptop',
'Q94_6' : 'price_jeans',
'Q94_7' : 'price_sneakers',
'Q94_8' : 'price_microwave',
'Q94_9' : 'price_washing_machine',
'Q94_10' : 'price_office_chair',
'Q95_1' : 'spend_savings_emergencies',
'Q95_3' : 'spend_necessities_bills',
'Q95_4' : 'spend_entertainment_gift_loved_one',
'Q97':'restaurant_ethics', 'Q99':'criminal_ethics', 'source':'data_source',
'Q11_0':'gender_0', 'Q11_1':'gender_1', 'Q11_2':'gender_2',
'Q12_0': 'age_0', 'Q12_1': 'age_1', 'Q12_2': 'age_2',
'Q13_0': 'race_0','Q13_1': 'race_1','Q13_2': 'race_2','Q13_3': 'race_3','Q13_4': 'race_4',
'Q14_0': 'school_0','Q14_1': 'school_1','Q14_2': 'school_2',
'Q16_0': 'employment_0','Q16_1': 'employment_1','Q16_2': 'employment_2',
'Q18_0': 'religion_0','Q18_1': 'religion_1','Q18_2': 'religion_2','Q18_3': 'religion_3',
'Q22_0': 'household_income_0','Q22_1': 'household_income_1', 'Q22_2': 'household_income_2',
'Q23_0': 'zip_code_0','Q23_1': 'zip_code_1', 'Q23_2':'zip_code_2','Q23_3': 'zip_code_3','Q23_4': 'zip_code_4',
'Q25_0': 'political_party_0','Q25_1': 'political_party_1','Q25_2': 'political_party_2',
'Q31_0': 'facebook_evil_0','Q31_1': 'facebook_evil_1', 'Q31_2': 'facebook_evil_2',
'Q67_0': 'sports_programming_0','Q67_1': 'sports_programming_1', 'Q67_2': 'sports_programming_2',
'Q70_0': 'video_watching_0', 'Q70_1': 'video_watching_1', 'Q70_2': 'video_watching_2',
'personality_extraversion':'personality_extraversion',
'personality_agreeableness':'personality_agreeableness',
'personality_conscientiousness':'personality_conscientiousness',
'personality_neuroticism':'personality_neuroticism',
'personality_openness':'personality_openness',
'Q71#1_1' : 'active_consumer_google_news',
'Q71#1_2' : 'active_consumer_yahoo_news',
'Q71#1_3' : 'active_consumer_new_york_times',
'Q71#1_4' : 'active_consumer_wsj',
'Q71#1_5' : 'active_consumer_boston_globe',
'Q71#1_6' : 'active_consumer_cnn',
'Q71#1_7' : 'active_consumer_huffpost',
'Q71#1_8' : 'active_consumer_foxnews',
'Q71#1_10' : 'active_consumer_vice',
'Q71#1_11' : 'active_consumer_chicago_tribune',
'Q71#1_12' : 'active_consumer_breitbart',
'Q71#1_14' : 'active_consumer_washington_post',
'Q71#1_16' : 'active_consumer_bbc_news',
'Q71#1_17' : 'active_consumer_facebook',
'Q71#1_19' : 'active_consumer_twitter',
'Q71#2_1' : 'bias_google_news',
'Q71#2_2' : 'bias_yahoo_news',
'Q71#2_3' : 'bias_new_york_times',
'Q71#2_4' : 'bias_wsj',
'Q71#2_5' : 'bias_boston_globe',
'Q71#2_6' : 'bias_cnn',
'Q71#2_7' : 'bias_huffpost',
'Q71#2_8' : 'bias_foxnews',
'Q71#2_10' : 'bias_vice',
'Q71#2_11' : 'bias_chicago_tribune',
'Q71#2_12' : 'bias_breitbart',
'Q71#2_14' : 'bias_washington_post',
'Q71#2_16' : 'bias_bbc_news',
'Q71#2_17' : 'bias_facebook',
'Q71#2_19' : 'bias_twitter',
'Q6_1_TEXT_0' : 'browser_safari_iphone',
'Q6_1_TEXT_1' : 'browser_chrome',
'Q6_1_TEXT_2' : 'browser_other',
}
image_metrics = {
'rc' : 'red_color',
'gc' : 'green_color',
'bc' : 'blue_color',
'fwhr' : 'face_with_2_height_ratio',
'fwidth' : 'face_width',
'fheight': 'face_height',
'sideeyeratio' : 'face_to_eye_left_right_ratio',
'noseheight' : 'nose_height',
'eyehdiff' : 'eye_height_difference',
'intereyedist': 'inter_eye_difference',
'lipwidth' : 'lip_width',
}
'''
q_to_full_name_dict is similar to q_to_name_dict and contains
match between variable code from the survey results file and a full name of the variable -- used in plotting
'''
q_to_full_name_dict = {'Q15':'Marital status',
'Q17':'Social class',
'Q21':'Body fitness',
'Q24':'Sexual orientation',
'Q26':'Believes global warming is a threat',
'Q27':'Makes effort to recycle',
'Q28':'Considers himself religious',
'Q29':'Believes offensive ads should be banned',
'Q30':'Will stop buying a brand accused of offensive advertising',
'Q32':'Supports National Rifle Association (NRA)',
'Q34':'More important: Family vs. career',
'Q35':'More important: Friendship vs. laws',
'Q36':'More important: Freedom vs. truth',
'Q37':'More important: Pleasure vs. duty',
'Q38':'More important: Wealth vs. fame',
'Q39':'More important: Politeness vs. honesty',
'Q40':'More important: Being beautiful vs. being smart',
'Q41':'More important: Belonging vs. independence',
# Lifestyle
'Q42_1': 'Lifestyle: Prefers a set routine',
'Q42_4': 'Lifestyle: Likes to try new things',
'Q42_5': 'Lifestyle: Is highly social with many friends',
'Q42_6': 'Lifestyle: Buys new things before others',
'Q42_7': 'Lifestyle: Is outgoing and socially confident',
'Q42_8': 'Lifestyle: Tends to make compulsive purchases',
'Q42_10': 'Lifestyle: Is likely to participate in a political protest',
'Q42_11': 'Lifestyle: Is likely to donate to a beggar',
'Q42_12': 'Lifestyle: Likes hunting',
'Q42_13': 'Lifestyle: Likes fishing',
'Q42_14': 'Lifestyle: Likes hiking',
'Q42_15': 'Lifestyle: Likes out of doors',
'Q42_16': 'Lifestyle: Cabin by a quiet lake is a good way to spend summer',
'Q42_17': 'Lifestyle: Is good at fixing mechanical things',
'Q42_18': 'Lifestyle: Repairs his own car',
'Q42_19': 'Lifestyle: Likes war stories',
'Q42_20': 'Lifestyle: Would do better than average in a fist fight',
'Q42_21': 'Lifestyle: Would want to be a professional football player',
'Q42_22': 'Lifestyle: Would like to be policeman',
'Q42_23': 'Lifestyle: Thinks there is too much violence on TV',
'Q42_24': 'Lifestyle: Believes there should be a gun in every home',
'Q42_25': 'Lifestyle: Likes danger',
'Q42_26': 'Lifestyle: Would like his own airplane',
'Q42_27': 'Lifestyle: Likes to play poker',
'Q42_28': 'Lifestyle: Smokes too much',
'Q42_29': 'Lifestyle: Loves to eat',
'Q42_30': 'Lifestyle: Spends money on himself that should be spent on family',
'Q42_31': 'Lifestyle: Believes that if given a chance men would cheat on spouses',
'Q42_33': 'Lifestyle: Is satisfied with life',
'Q42_34': 'Lifestyle: Likes to be in charge',
'Q42_35': 'Lifestyle: Enjoys shopping',
'Q42_36': 'Lifestyle: Plans spending carefully',
'Q42_37': 'Lifestyle: Obeys rules',
'Q43_1': 'Food habits, attitudes: Is satisfied with his weight',
'Q43_4': 'Food habits, attitudes: Follows regular exercise routine',
'Q43_5': 'Food habits, attitudes: Grew up eating healthy foods',
'Q43_7': 'Food habits, attitudes: Finds it hard to be disciplined about what he eats',
'Q43_9': 'Food habits, attitudes: Does not have to worry about how he eats',
'Q43_11': 'Food habits, attitudes: Never thinks of healthy or unhealthy food',
'Q43_13': 'Food habits, attitudes: Sticks to healthy diet for his family',
'Q43_14': 'Food habits, attitudes:: Chooses snack foods that give vitamins and minerals',
'Q44_1': 'Food habits, attitudes: Often prepares sauces, dips from scratch',
'Q44_5': 'Food habits, attitudes: Does not have much interest in cooking',
'Q44_6': 'Food habits, attitudes: Seeks out healthy foods',
'Q44_8': 'Food habits, attitudes: Reads ingredient list on the label',
'Q44_9': 'Food habits, attitudes: Looks for new products when at grocery store',
'Q44_11': 'Food habits, attitudes: Believes lower priced products are the same as higher priced ones',
'Q44_13': 'Food habits, attitudes: Look for authentic ingredients and flavors',
'Q44_14': 'Food habits, attitudes: Likes ethnic foods',
'Q44_15': 'Food habits, attitudes: Is daring, adventurous in trying new foods',
'Q45_42': 'Breakfast food choice: No breakfast',
'Q45_43': 'Breakfast food choice: Bar',
'Q45_44': 'Breakfast food choice: Fruit',
'Q45_45': 'Breakfast food choice: Nuts',
'Q45_46': 'Breakfast food choice: Regular yogurt',
'Q45_47': 'Breakfast food choice: Greek yogurt',
'Q45_48': 'Breakfast food choice: Muffin or croissant',
'Q45_49': 'Breakfast food choice: Cold cereal',
'Q45_50': 'Breakfast food choice: Hot cereal or oatmeal',
'Q45_51': 'Breakfast food choice: Frozen_waffle',
'Q45_52': 'Breakfast food choice: Cheese, cottage cheese',
'Q45_53': 'Breakfast food choice: Sandwich',
'Q45_54': 'Breakfast food choice: Salad',
'Q45_55': 'Breakfast food choice: Eggs',
'Q45_56': 'Breakfast food choice: Meat',
'Q45_57': 'Breakfast food choice: Chicken',
'Q45_58': 'Breakfast food choice: Fish',
'Q45_59': 'Breakfast food choice: Potatoes',
'Q45_60': 'Breakfast food choice: Vegetables',
'Q45_61': 'Breakfast food choice: Soup',
'Q45_62': 'Breakfast food choice: Pasta',
'Q45_63': 'Breakfast food choice: Hummus',
'Q45_64': 'Breakfast food choice: Bread, toast',
'Q45_65': 'Breakfast food choice: Bagel, roll',
'Q45_66': 'Breakfast food choice: Chocolate candy',
'Q45_67': 'Breakfast food choice: Cake, cookies',
'Q45_68': 'Breakfast food choice: Chips',
'Q45_69': 'Breakfast food choice: Crackers',
'Q45_70': 'Breakfast food choice: Pretzels',
'Q45_71': 'Breakfast food choice: Smoothie',
'Q45_72': 'Breakfast food choice: Pastry, buns, fruit pies',
'Q45_73': 'Breakfast food choice: Brownies, snack, cakes',
'Q45_74': 'Breakfast food choice: Popcorn',
'Q45_75': 'Breakfast food choice: Ice cream, sorbet',
'Q45_76': 'Breakfast food choice: Pudding, gelatin',
'Q45_77': 'Breakfast food choice: refrigerated dip (salsa, guacamole, dairy)',
'Q46_1': 'Breakfast food choice motivations: Gives energy',
'Q46_4': 'Breakfast food choice motivations: Tides him over until next meal',
'Q46_5': 'Breakfast food choice motivations: Tastes great',
'Q46_6': 'Breakfast food choice motivations: Satisfies a craving',
'Q46_7': 'Breakfast food choice motivations: Is comforting, soothing',
'Q46_8': 'Breakfast food choice motivations: Healthy, good, guilt free',
'Q46_9': 'Breakfast food choice motivations: Takes care of hunger, is filling',
'Q46_10': 'Breakfast food choice motivations: Is not too filling',
'Q46_11': 'Breakfast food choice motivations: Fits with who he is',
'Q46_12': 'Breakfast food choice motivations: Helps relax, reduce stress',
'Q46_13': 'Breakfast food choice motivations: Helps control weight',
'Q46_14': 'Breakfast food choice motivations: Helps maintain mental focus',
'Q46_15': 'Breakfast food choice motivations: Keeps from overeating during next meal',
'Q46_16': 'Breakfast food choice motivations: Has great texture',
'Q46_17': 'Breakfast food choice motivations: Tastes sweet',
'Q46_18': 'Breakfast food choice motivations: Tastes tangy, savory',
'Q46_19': 'Breakfast food choice motivations: Has chunky, multidimensional texture',
'Q46_20': 'Breakfast food choice motivations: Has smooth, creamy texture',
'Q46_21': 'Breakfast food choice motivations: Gives protein',
'Q46_22': 'Breakfast food choice motivations: Keeps him going',
'Q46_23': 'Breakfast food choice motivations: Is good food to eat with others',
'Q46_24': 'Breakfast food choice motivations: Keeps him on track',
'Q46_25': 'Breakfast food choice motivations: Likes ingredients',
'Q46_26': 'Breakfast food choice motivations: Has refreshing taste',
'Q47':'Is ready to pay more for organic food products',
'Q48':'Is a frequent alcohol consumer',
'Q49':'Missed a credit card payment within last year',
'Q50_1':'Regularly felt emotions: Happiness',
'Q50_2':'Regularly felt emotions: Stress',
'Q50_3':'Regularly felt emotions: Loneliness',
'Q50_4':'Regularly felt emotions: Jealousy',
'Q50_5':'Regularly felt emotions: Fear',
'Q50_6':'Regularly felt emotions: Hopefulness',
'Q50_7':'Regularly felt emotions: Regret',
'Q50_8':'Regularly felt emotions: Optimism',
'Q50_9':'Regularly felt emotions: Contentness',
'Q50_10':'Regularly felt emotions: Gratitude',
'Q50_11':'Regularly felt emotions: Guilt',
'Q50_12':'Regularly felt emotions: Anger',
'Q50_13':'Regularly felt emotions: Joy',
'Q50_14':'Regularly felt emotions: Contempt',
'Q50_15':'Regularly felt emotions: Disgust',
'Q50_16':'Regularly felt emotions: Sadness',
'Q50_17':'Regularly felt emotions: Surprise',
'Q50_18':'Regularly felt emotions: Vulnerability',
'Q50_19':'Regularly felt emotions: Curiosity',
'Q50_20':'Regularly felt emotions: Warmth',
'Q51':'Frequency of entertaining others at home',
'Q52_1':'Likelihood of social media post about positive shopping experience',
'Q52_2':'Likelihood of social media post about negative shopping experience',
'Q53':'Actively recommends movies to watch to friends',
'Q54':'Likelihood of asking a friend for a movie recommendation',
'Q55':'Likelihood of following a movie recommendation from a friend',
'Q56_1': 'Big 5 variable: Is talkative',
'Q56_4': 'Big 5 variable: Tends to find faults with others (reverse)',
'Q56_5': 'Big 5 variable: Does thorough job',
'Q56_6': 'Big 5 variable: Is depressed, blue',
'Q56_7': 'Big 5 variable: Is original, comes up new ideas',
'Q56_8': 'Big 5 variable: Is helpful, unselfish',
'Q56_9': 'Big 5 variable: Is relaxed, handles stress well (reverse)',
'Q56_10': 'Big 5 variable: Is curious about many different things',
'Q56_11': 'Big 5 variable: Is full of energy',
'Q56_12': 'Big 5 variable: Starts quarrels with others (reverse)',
'Q56_13': 'Big 5 variable: Can be tense',
'Q56_14': 'Big 5 variable: Is ingenious, deep thinker',
'Q56_15': 'Big 5 variable: Has forgiving nature',
'Q56_16': 'Big 5 variable: Tends to be lazy (reverse)',
'Q56_17': 'Big 5 variable: Is emotionally stable, not easily upset (reverse)',
'Q56_18': 'Big 5 variable: Is inventive',
'Q56_19': 'Big 5 variable: Has assertive personality',
'Q56_20': 'Big 5 variable: Can be cold, aloof (reverse)',
'Q56_21': 'Big 5 variable: Perseveres until task is finished',
'Q56_22': 'Big 5 variable: Can be moody',
'Q56_23': 'Big 5 variable: Values artistic, aesthetic experience',
'Q56_24': 'Big 5 variable: Is sometimes shy, inhibited (reverse)',
'Q56_25': 'Big 5 variable: Is considerate, kind to almost everyone',
'Q56_26': 'Big 5 variable: Does things efficiently',
'Q56_27': 'Big 5 variable: Remains calm in tense situations (reverse)',
'Q56_28': 'Big 5 variable: Prefers routine work (reverse)',
'Q56_29': 'Big 5 variable: Is outgoing, sociable',
'Q56_30': 'Big 5 variable: Is sometimes rude to others (reverse)',
'Q56_31': 'Big 5 variable: Makes plans and follows through',
'Q56_32': 'Big 5 variable: Gets nervous easily',
'Q56_33': 'Big 5 variable: Likes to reflect, play with ideas',
'Q56_39': 'Big 5 variable: Likes to cooperate with others',
'Q56_40': 'Big 5 variable: Is easily distracted (reverse)',
'Q56_41': 'Big 5 variable: Is sophisticated in arts, music, literature',
'Q56_42': 'Big 5 variable: Generates enthusiasm',
'Q56_43': 'Big 5 variable: Is reliable worker',
'Q56_44': 'Big 5 variable: Is reserved (reverse)',
'Q56_45': 'Big 5 variable: Can be somewhat careless (reverse)',
'Q56_46': 'Big 5 variable: Tends to be disorganized (reverse)',
'Q56_47': 'Big 5 variable: Worries a lot',
'Q56_48': 'Big 5 variable: Has active imagination',
'Q56_49': 'Big 5 variable: Tends to be quiet (reverse)',
'Q56_50': 'Big 5 variable: Is generally trusting',
'Q56_52': 'Big 5 variable: Has few artistic interests (reverse)',
'Q57_1':'Uses Facebook', 'Q57_2':'Uses Twitter', 'Q57_3':'Uses Netflix',
'Q57_4':'Uses Spotify', 'Q57_5':'Uses Apple music', 'Q57_6':'Uses Tinder',
'Q57_7':'Uses Pandora', 'Q57_9':'Uses Amazon',
'Q57_11':'Uses Saks', 'Q57_13':'Uses Dropbox',
'Q57_14':'Uses Gmail', 'Q57_15':'Uses Hotmail',
'Q57_16':'Uses Yahoo', 'Q57_18':'Uses Github',
'Q57_20':'Uses Shazam', 'Q57_21':'Uses Snapchat',
'Q57_22':'Uses Whatsapp', 'Q57_23':'Uses Instagram',
'Q57_24':'Uses Telegram', 'Q57_27':'Uses Hulu',
'Q57_30':'Uses Bloomingdales', 'Q57_31':'Uses NYT',
'Q57_32':'Uses WSJ',
'Q59' : 'Watches Netflix 4 or more days per week',
'Q60' : 'Tends to watch more than 3 hours of Netflix at a time',
'Q61' : 'Likelihood of recommending Netflix to a friend',
'Q62' : 'Intent to get Netflix subscription within 6 months',
'Q63':'Perceived effect of Superbowl ads on choices',
'Q64_1':'Trusts TV news',
'Q64_2':'Trusts Internet news',
'Q65':'Tracks news daily',
'Q66':'Reads product review in detail before purchase', #'Q67':'sports_programming',
'Q68':'Spends 4 hours or more a day on social media',
'Q69':'Frequency of posting on social media', #'Q70':'video_watching',
'Q73':'Prefers: iPhone vs. Galaxy', 'Q74':'Prefers: Clothing vs. tech', 'Q75':'Prefers: Recognizable brand vs. not well-known brand',
'Q76':'Prefers: Chocolate ice cream vs. strawberry ice cream', 'Q77':'Prefers: Original coke vs. diet',
'Q78':'Prefers: Coke vs. Pepsi', 'Q79':'Prefers: Night in club vs. night with a book', 'Q80':'Prefers: Beach vs. mountain',
'Q81':'Prefers: Telling a story vs. listening to a story', 'Q82':'Prefers: Capitalism vs. socialism',
'Q83':'Prefers: Children vs. no children', 'Q84':'Prefers: Thinking vs. acting', 'Q85':'Prefers: Planning vs. spontaneity',
'Q86':'Prefers: Trump vs. Hillary', 'Q87':'Prefers: Madonna vs. <NAME>', 'Q88':'Prefers: Beatles vs. <NAME>',
'Q89':'Is better/ worse financially than a year before',
'Q90':'Expects to be better/ worse financially in a year',
'Q91':'Expects good/ bad times financially in the US within a year',
'Q92':'Expects economic depression in the next five years',
'Q93':'Considers it to be a good time to buy a major household item',
'Q94_1' : 'Price sensitivity: Bicycle',
'Q94_4' : 'Price sensitivity: Smartphone',
'Q94_5' : 'Price sensitivity: Laptop',
'Q94_6' : 'Price sensitivity: Jeans',
'Q94_7' : 'Price sensitivity: Sneakers',
'Q94_8' : 'Price sensitivity: Microwave',
'Q94_9' : 'Price sensitivity: Washing machine',
'Q94_10' : 'Price sensitivity: Office chair',
'Q95_1' : 'Windfall income allocation: Savings, emergencies',
'Q95_3' : 'Windfall income allocation: Necessities, bills',
'Q95_4' : 'Windfall income allocation: Gift to a loved one',
'Q97':'Ethics: What right does your friend have to expect you to go easy on her restaurant in your review?',
'Q99':'Ethics: What right does your friend have to expect you to lie in court to protect him?',
'source':'Data source: Qualtrics panel vs. MTurk',
'Q11_0': 'Gender: Male', 'Q11_1':'Gender: Female', 'Q11_2':'Gender: Other',
'Q12_0': 'Age: <=30', 'Q12_1': 'Age: (30; 50] ', 'Q12_2': 'Age: > 50',
'Q13_0': 'Race: Caucasian/ White', 'Q13_1': 'Race: Asian','Q13_2': 'Race: Hispanic/ Latino','Q13_3': 'Race: African American/ Black','Q13_4': 'Race: Other',
'Q14_0': 'Education achieved: High school or less','Q14_1': 'Education achieved: Undergraduate degree','Q14_2': 'Education achieved: Graduate degree',
'Q16_0': 'Employment: Employed/ student','Q16_1': 'Employment: Unemployed, but looking','Q16_2': 'Employment: Unemployed and not looking',
'Q18_0': 'Religious background: Christianity','Q18_1': 'Religious background: Judaism, Islam','Q18_2': 'Religious background: Other (Hinduism, Buddhism, etc.)','Q18_3': 'Religious background: No particular religion',
'Q22_0': 'Household income: <$50K','Q22_1': 'Household income: [$50K,$100K)', 'Q22_2': 'Household income: >=$100K',
'Q23_0': 'ZIP code first digit: 0, 1','Q23_1': 'ZIP code first digit: 2, 3', 'Q23_2':'ZIP code first digit: 4, 5','Q23_3': 'ZIP code first digit: 6, 7','Q23_4': 'ZIP code first digit: 8, 9',
'Q25_0': 'Political party alignment: Republican','Q25_1': 'Political party alignment: Democrat','Q25_2': 'Political party alignment: Independent',
'Q31_0': 'Facebook is good for humanity: Yes','Q31_1': 'Facebook is good for humanity: No', 'Q31_2': 'Facebook is good for humanity: Unsure',
'Q67_0': 'Sports programming hours watched per week: 0','Q67_1': 'Sports programming hours watched per week: (0,8]', 'Q67_2': 'Sports programming hours watched per week: >8',
'Q70_0': 'Prefers to watch videos: Online', 'Q70_1': 'Prefers to watch videos: TV', 'Q70_2': 'Prefers to watch videos: Does not watch videos',
'personality_extraversion':'Big 5 personality: Extraversion',
'personality_agreeableness':'Big 5 personality: Agreeableness',
'personality_conscientiousness':'Big 5 personality: Conscientiousness',
'personality_neuroticism':'Big 5 personality: Neuroticism',
'personality_openness':'Big 5 personality: Openness',
'Q71#1_1' : 'Active consumer: Google news',
'Q71#1_2' : 'Active consumer: Yahoo news',
'Q71#1_3' : 'Active consumer: New York Times',
'Q71#1_4' : 'Active consumer: WSJ',
'Q71#1_5' : 'Active consumer: Boston Globe',
'Q71#1_6' : 'Active consumer: CNN',
'Q71#1_7' : 'Active consumer: Huffpost',
'Q71#1_8' : 'Active consumer: FoxNews',
'Q71#1_10' : 'Active consumer: Vice',
'Q71#1_11' : 'Active consumer: Chicago Tribune',
'Q71#1_12' : 'Active consumer: Breitbart',
'Q71#1_14' : 'Active consumer: Washington Post',
'Q71#1_16' : 'Active consumer: BBC News',
'Q71#1_17' : 'Active consumer: Facebook',
'Q71#1_19' : 'Active consumer: Twitter',
'Q71#2_1' : 'Perception of bias: Google News',
'Q71#2_2' : 'Perception of bias: Yahoo News',
'Q71#2_3' : 'Perception of bias: New York Times',
'Q71#2_4' : 'Perception of bias: WSJ',
'Q71#2_5' : 'Perception of bias: Boston Globe',
'Q71#2_6' : 'Perception of bias: CNN',
'Q71#2_7' : 'Perception of bias: Huffpost',
'Q71#2_8' : 'Perception of bias: FoxNews',
'Q71#2_10' : 'Perception of bias: Vice',
'Q71#2_11' : 'Perception of bias: Chicago Tribune',
'Q71#2_12' : 'Perception of bias: Breitbart',
'Q71#2_14' : 'Perception of bias: Washington Post',
'Q71#2_16' : 'Perception of bias: BBC News',
'Q71#2_17' : 'Perception of bias: Facebook',
'Q71#2_19' : 'Perception of bias: Twitter',
'Q6_1_TEXT_0' : 'Browser: Safari iPhone',
'Q6_1_TEXT_1' : 'Browser: Chrome',
'Q6_1_TEXT_2' : 'Browser: Other',
# 'rc' : 'Color channel: Red',
# 'gc' : 'Color channel: Green',
# 'bc' : 'Color channel: Blue',
# 'fwhr' : 'Face width-to-height ratio',
# 'fwidth' : 'Face width',
# 'fheight': 'Face height',
# 'sideeyeratio' : 'Face-edge to eye distance, left to right ratio',
# 'noseheight' : 'Nose height',
# 'eyehdiff' : 'Eye height difference',
# 'intereyedist': 'Inter-eye difference',
# 'lipwidth' : 'Lip width',
}
'''
var_groups contains a grouping of variables by categories we identified
some variables, such as data source (qualtrics vs. mturk) are not included in the grouping
'''
var_groups = {
'demographics_biological' : [
'Q11_1', # gender
'Q12_0', 'Q12_1', # age
'Q13_0','Q13_1', 'Q13_2','Q13_3', # race
'Q21', # body fitness
'Q24',# orientation
# 'rc', 'gc', 'bc',# avg. face color
# 'fwhr', 'fwidth', 'fheight',
# 'sideeyeratio', 'noseheight', 'eyehdiff', 'intereyedist', 'lipwidth'
],
'demographics_socio_economic' : [
'Q15', # :'marital_status'
'Q17', #:'social_class'
'Q14_0', 'Q14_1', # school level
'Q16_0', 'Q16_1', # employment status
'Q18_0','Q18_1','Q18_2', # religious
'Q22_0', 'Q22_1', # household income
'Q23_0','Q23_1', 'Q23_2','Q23_3', # zip code
'Q25_0', 'Q25_1'], # political party
'personality' : ['personality_extraversion',
'personality_agreeableness',
'personality_conscientiousness',
'personality_neuroticism',
'personality_openness'
],
'character_ethics' : [
'Q97', #'restaurant_ethics'
'Q99', #'criminal_ethics'
'Q49', #'credit_score',
'Q48', #'alcohol',
],
'lifestyle' : [
'Q42_1',#: 'lfstl_set_routine',
'Q42_4',#: 'lfstl_try_new_things',
'Q42_5',#: 'lfstl_highly_social_many_friends',
'Q42_6',#: 'lfstl_buy_new_before_others',
'Q42_7',#: 'lfstl_outgoing_soc_confident',
'Q42_8',#: 'lfstl_compulsive_purchases',
'Q42_10',#: 'lfstl_political_protest_participation',
'Q42_11',#: 'lfstl_donate_to_beggar',
'Q42_12',#: 'lfstl_like_hunting',
'Q42_13',#: 'lfstl_like_fishing',
'Q42_14',#: 'lfstl_like_hiking',
'Q42_15',#: 'lfstl_like_out_of_doors',
'Q42_16',#: 'lfstl_cabin_by_quiet_lake_spend_summer',
'Q42_17',#: 'lfstl_good_fixing_mechanical_things',
'Q42_18',#: 'lfstl_repair_my_own_car',
'Q42_19',#: 'lfstl_like_war_stories',
'Q42_20',#: 'lfstl_do_better_than_avg_fist_fight',
'Q42_21',#: 'lfstl_would_want_to_be_prof_football_player',
'Q42_22',#: 'lfstl_would_like_to_be_policeman',
'Q42_23',#: 'lfstl_too_much_violence_on_tv',
'Q42_24',#: 'lfstl_should_be_gun_in_every_home',
'Q42_25',#: 'lfstl_like_danger',
'Q42_26',#: 'lfstl_would_like_my_own_airplane',
'Q42_27',#: 'lfstl_like_to_play_poker',
'Q42_28',#: 'lfstl_smoke_too_much',
'Q42_29',#: 'lfstl_love_to_eat',
'Q42_30',#: 'lfstl_spend_money_on_myself_that_shuld_spend_on_family',
'Q42_31',#: 'lfstl_if_given_chance_men_would_cheat_on_spouses',
'Q42_33',#: 'lfstl_satisfied_with_life',
'Q42_34',#: 'lfstl_like_to_be_in_charge',
'Q42_35',#: 'lfstl_enjoy_shopping',
'Q42_36',#: 'lfstl_plan_spending_carefully',
'Q42_37',#: 'lfstl_obey_rules',
],
'food_habits_and_attitudes' : [
'Q43_1',#: 'lfstl_satisfied_with_weight',
'Q43_4',#: 'lfstl_regular_exercise_routine',
'Q43_5',#: 'lfstl_grew_up_eating_healthy_foods',
'Q43_7',#: 'lfstl_hard_to_be_disciplined_about_what_i_eat',
'Q43_9',#: 'lfstl_dont_have_to_worry_how_i_eat',
'Q43_11',#: 'lfstl_never_think_healthy_unhealthy_food',
'Q43_13',#: 'lfstl_stick_to_healthy_diet_for_family',
'Q43_14',#: 'lfstl_choose_snack_foods_that_give_vitamins_minerals',
'Q44_1',#: 'lfstl_often_prepare_sauces_dips_from_scratch',
'Q44_5',#: 'lfstl_dont_have_much_interest_cooking',
'Q44_6',#: 'lfstl_seek_out_healthy_foods',
'Q44_8',#: 'lfstl_read_ingreadients_list_on_the_label',
'Q44_9',#: 'lfstl_looking_for_new_products_when_at_grocery_store',
'Q44_11',#: 'lfstl_lower_priced_products_same_as_higher_priced',
'Q44_13',#: 'lfstl_look_for_authentic_ingredients_flavors',
'Q44_14',#: 'lfstl_like_ethnic_foods',
'Q44_15',#: 'lfstl_daring_adventurous_trying_new_foods',
'Q47',#:'pay_organic',
],
'emotional_state' : [
'Q50_1',#:'em_happiness',
'Q50_2',#:'em_stress',
'Q50_3',#:'em_loneliness',
'Q50_4',#:'em_jealousy',
'Q50_5',#:'em_fear',
'Q50_6',#:'em_hopefulness',
'Q50_7',#:'em_regret',
'Q50_8',#:'em_optimism',
'Q50_9',#:'em_contentness',
'Q50_10',#:'em_gratitude',
'Q50_11',#:'em_guilt',
'Q50_12',#:'em_anger',
'Q50_13',#:'em_joy',
'Q50_14',#:'em_contempt',
'Q50_15',#:'em_disgust',
'Q50_16',#:'em_sadness',
'Q50_17',#:'em_surprise',
'Q50_18',#:'em_vulnerability',
'Q50_19',#:'em_curiosity',
'Q50_20',#:'em_warmth'
],
'values_and_beliefs' : [
'Q26',#:'global_warming',
'Q27',#:'recycling',
'Q28',#:'religious',
'Q29',#:'offensive_ads_banned',
'Q30',#:'offensive_ads_brand',
'Q32',#:'NRA_support',
'Q31_0',#: 'facebook_evil_0',
'Q31_1',#: 'facebook_evil_1',
'Q31_2',#: 'facebook_evil_2',
'Q34',#:'bin_family_career',
'Q35',#:'bin_friendship_laws',
'Q36',#:'bin_freedom_truth',
'Q37',#:'bin_pleasure_duty',
'Q38',#:'bin_wealth_fame',
'Q39',#:'bin_politeness_honesty',
'Q40',#:'bin_beautiful_smart',
'Q41',#:'bin_belonging_independence',
],
'price_sensitivity' : [
'Q94_1',# : 'price_bicycle',
'Q94_4',# : 'price_smartphone',
'Q94_5',# : 'price_laptop',
'Q94_6',# : 'price_jeans',
'Q94_7',# : 'price_sneakers',
'Q94_8',# : 'price_microwave',
'Q94_9',# : 'price_washing_machine',
'Q94_10',# : 'price_office_chair',
],
'breakfast_food_choice' : [
'Q45_42',#: 'brkfst_none',
'Q45_43',#: 'brkfst_bar',
'Q45_44',#: 'brkfst_fruit',
'Q45_45',#: 'brkfst_nuts',
'Q45_46',#: 'brkfst_regular_yogurt',
'Q45_47',#: 'brkfst_greek_yogurt',
'Q45_48',#: 'brkfst_muffin_croissant',
'Q45_49',#: 'brkfst_cold_cereal',
'Q45_50',#: 'brkfst_hot_cereal_oatmeal',
'Q45_51',#: 'brkfst_frozen_waffle',
'Q45_52',#: 'brkfst_cheese_cottage_cheese',
'Q45_53',#: 'brkfst_sandwhich',
'Q45_54',#: 'brkfst_salad',
'Q45_55',#: 'brkfst_eggs',
'Q45_56',#: 'brkfst_meat',
'Q45_57',#: 'brkfst_chicken',
'Q45_58',#: 'brkfst_fish',
'Q45_59',#: 'brkfst_potatoes',
'Q45_60',#: 'brkfst_vegetables',
'Q45_61',#: 'brkfst_soup',
'Q45_62',#: 'brkfst_pasta',
'Q45_63',#: 'brkfst_hummus',
'Q45_64',#: 'brkfst_bread_toast',
'Q45_65',#: 'brkfst_bagel_roll',
'Q45_66',#: 'brkfst_chocolate_candy',
'Q45_67',#: 'brkfst_cake_cookies',
'Q45_68',#: 'brkfst_chips',
'Q45_69',#: 'brkfst_crackers',
'Q45_70',#: 'brkfst_pretzels',
'Q45_71',#: 'brkfst_smoothie',
'Q45_72',#: 'brkfst_pastry_buns_fruit_pies',
'Q45_73',#: 'brkfst_brownies_snack_cakes',
'Q45_74',#: 'brkfst_popcorn',
'Q45_75',#: 'brkfst_ice_cream_sorbet',
'Q45_76',#: 'brkfst_pudding_gelatin',
'Q45_77',#: 'brkfst_refrig_dip_salsa_guacamole_dairy',
],
'breakfast_motivations' : [
'Q46_1',#: 'rsn_brkfst_gives_energy',
'Q46_4',#: 'rsn_brkfst_tide_over_next_meal',
'Q46_5',#: 'rsn_brkfst_great_taste',
'Q46_6',#: 'rsn_brkfst_satisfies_craving',
'Q46_7',#: 'rsn_brkfst_comforting_soothing',
'Q46_8',#: 'rsn_brkfst_healthy_good_guilt_free',
'Q46_9',#: 'rsn_brkfst_take_care_of_hunger_filling',
'Q46_10',#: 'rsn_brkfst_not_too_filling',
'Q46_11',#: 'rsn_brkfst_fits_with_who_i_am',
'Q46_12',#: 'rsn_brkfst_helps_relax_reduce_stress',
'Q46_13',#: 'rsn_brkfst_helps_control_weight',
'Q46_14',#: 'rsn_brkfst_helps_maintain_mental_focus',
'Q46_15',#: 'rsn_brkfst_keeps_from_overeating_next_meal',
'Q46_16',#: 'rsn_brkfst_great_texture',
'Q46_17',#: 'rsn_brkfst_sweet_taste',
'Q46_18',#: 'rsn_brkfst_tangy_savory_taste',
'Q46_19',#: 'rsn_brkfst_chunky_multidim_texture',
'Q46_20',#: 'rsn_brkfst_smooth_creamy_texture',
'Q46_21',#: 'rsn_brkfst_gives_protein',
'Q46_22',#: 'rsn_brkfst_keeps_me_going',
'Q46_23',#: 'rsn_brkfst_good_food_to_eat_with_others',
'Q46_24',#: 'rsn_brkfst_keeps_me_on_track',
'Q46_25',#: 'rsn_brkfst_like_ingredients',
'Q46_26',#: 'rsn_brkfst_refreshing_taste',
],
'product_preferences' : [
'Q73',#:'bin_iphone_galaxy',
'Q74',#:'bin_clothing_tech',
'Q75',#:'bin_brand_recogn_not',
'Q76',#:'bin_chocolate_strawberry',
'Q77',#:'bin_coke_original_diet',
'Q78',#:'bin_coke_pepsi',
'Q79',#:'bin_club_book',
'Q80',#:'bin_beach_mountain',
'Q81',#:'bin_story_tell_listen',
'Q82',#:'bin_capitalism_socialism',
'Q83',#:'bin_children_not',
'Q84',#:'bin_thinking_acting',
'Q85',#:'bin_planning_spontaneity',
'Q86',#:'bin_trump_hillary',
'Q87',#:'bin_madonna_lady_gaga',
'Q88',#:'bin_beatles_michael_jackson',
],
'online_service_usage' : [
'Q57_1',#:'use_facebook',
'Q57_2',#:'use_twitter',
'Q57_3',#:'use_netflix',
'Q57_4',#:'use_spotify',
'Q57_5',#:'use_apple_music',
'Q57_6',#:'use_tinder',
'Q57_7',#:'use_pandora',
'Q57_9',#:'use_amazon',
'Q57_11',#:'use_saks',
'Q57_13',#:'use_dropbox',
'Q57_14',#:'use_gmail',
'Q57_15',#:'use_hotmail',
'Q57_16',#:'use_yahoo',
'Q57_18',#:'use_github',
'Q57_20',#:'use_shazam',
'Q57_21',#:'use_snapchat',
'Q57_22',#:'use_whatsapp',
'Q57_23',#:'use_instagram',
'Q57_24',#:'use_telegram',
'Q57_27',#:'use_hulu',
'Q57_30',#:'use_bloomingdales',
'Q57_31',#:'use_NYT',
'Q57_32',#:'use_WSJ',
],
'browser' : [
'Q6_1_TEXT_0', #: 'Browser: Safari iPhone',
'Q6_1_TEXT_1', #: 'Browser: Chrome',
'Q6_1_TEXT_2', #: 'Browser: Other',
],
'media_source' : [
'Q71#1_1',# : 'active_consumer_google_news',
'Q71#1_2',# : 'active_consumer_yahoo_news',
'Q71#1_3',# : 'active_consumer_new_york_times',
'Q71#1_4',# : 'active_consumer_wsj',
'Q71#1_5',# : 'active_consumer_boston_globe',
'Q71#1_6',# : 'active_consumer_cnn',
'Q71#1_7',# : 'active_consumer_huffpost',
'Q71#1_8',# : 'active_consumer_foxnews',
'Q71#1_10',# : 'active_consumer_vice',
'Q71#1_11',# : 'active_consumer_chicago_tribune',
'Q71#1_12',# : 'active_consumer_breitbart',
'Q71#1_14',# : 'active_consumer_washington_post',
'Q71#1_16',# : 'active_consumer_bbc_news',
'Q71#1_17',# : 'active_consumer_facebook',
'Q71#1_19',# : 'active_consumer_twitter',
],
'media_trust' : [
'Q71#2_1',# : 'bias_google_news',
'Q71#2_2',# : 'bias_yahoo_news',
'Q71#2_3',# : 'bias_new_york_times',
'Q71#2_4',# : 'bias_wsj',
'Q71#2_5',# : 'bias_boston_globe',
'Q71#2_6',# : 'bias_cnn',
'Q71#2_7',# : 'bias_huffpost',
'Q71#2_8',# : 'bias_foxnews',
'Q71#2_10',# : 'bias_vice',
'Q71#2_11',# : 'bias_chicago_tribune',
'Q71#2_12',# : 'bias_breitbart',
'Q71#2_14',# : 'bias_washington_post',
'Q71#2_16',# : 'bias_bbc_news',
'Q71#2_17',# : 'bias_facebook',
'Q71#2_19',# : 'bias_twitter',
'Q64_1',#:'TV_news_trust',
'Q64_2',#:'Internet_news_trust',
],
'economic_outlook' : [
'Q89',#:'ec_past_fin_better',
'Q90',#:'ec_fut_fin_better',
'Q91',#:'ec_good_times',
'Q92',#:'ec_depression',
],
'spend_intentions' :[
'Q93',#:'ec_buy',
'Q95_1',# : 'spend_savings_emergencies',
'Q95_3',# : 'spend_necessities_bills',
'Q95_4',# : 'spend_entertainment_gift_loved_one',
'Q62', #: 'netflix_intend_to_get',
],
'media_consumption_intensity' : [
'Q65',#:'track_news_daily',
'Q68',#:'social_media_time',
'Q69',#:'social_media_posting',
'Q67_0',#: 'sports_programming_0',
'Q67_1',#: 'sports_programming_1',
'Q67_2',#: 'sports_programming_2',
'Q70_0',#: 'video_watching_0',
'Q70_1',#: 'video_watching_1',
'Q70_2',#: 'video_watching_2',
'Q59', #: 'netflix_frequent_viewer',
'Q60', #: 'netflix_binger',
],
'follower_characteristics' : [
'Q63',#:'superbowl',
'Q66',#:'read_reviews',
'Q55',#:'rec_lik_follow'
'Q54',#:'rec_lik_ask',
],
'influencer_characteristics' : [
'Q52_1',#:'post_lik_pos',
'Q52_2',#:'post_lik_neg',
'Q53',#:'movie_activ_rec',
'Q51',#:'entertain_freq'
'Q61', # : 'netflix_active_recommender',
],
}
'''
meta_groups contains labels for the buckets of the variable groups
'''
meta_groups = [
('Demographics', '', 'Biological characteristics', 'demographics_biological'),
('Demographics', '', 'Socio-economic status', 'demographics_socio_economic'),
('General psychographics', '', 'Values and beliefs', 'values_and_beliefs'),
('General psychographics', '', 'Big 5 personalities', 'personality'),
('General psychographics', '', 'Regularly felt emotions', 'emotional_state'),
('General psychographics', '', 'Character and ethical choices', 'character_ethics'),
('General psychographics', '', 'Lifestyle', 'lifestyle'),
('Consumer psychographics', 'Products and services', 'Product preferences', 'product_preferences'),
('Consumer psychographics', 'Products and services', 'Online service use', 'online_service_usage'),
('Consumer psychographics', 'Products and services', 'Browser', 'browser'),
('Consumer psychographics', 'Media', 'Media choice', 'media_source'),
('Consumer psychographics', 'Media', 'Media consumption intensity', 'media_consumption_intensity'),
('Consumer psychographics', 'Media', 'Media trust', 'media_trust'),
('Consumer psychographics', 'Influence', 'Influencer characteristics', 'influencer_characteristics'),
('Consumer psychographics', 'Influence', 'Follower characteristics', 'follower_characteristics'),
('Consumer psychographics', 'Economics', 'Spend intentions', 'spend_intentions'),
('Consumer psychographics', 'Economics', 'Price sensitivity', 'price_sensitivity'),
('Consumer psychographics', 'Economics', 'Economic outlook', 'economic_outlook'),
('Consumer psychographics', 'Food', 'Food habits and attitudes', 'food_habits_and_attitudes'),
('Consumer psychographics', 'Food', 'Breakfast food choice', 'breakfast_food_choice'),
('Consumer psychographics', 'Food', 'Breakfast food choice motivations', 'breakfast_motivations'),
]
meta_groups = pd.DataFrame(meta_groups)
meta_groups.columns = ['l0', 'l1', 'l2', 'l3']
'''
CustomDataset object takes care of supplying an observation (image, labels).
It also performs image preprocessing, such as normalization by color channel.
In case of training, it also performs random transformations, such as horizontal flips, resized crops, rotations, and color jitter -- to expand the observation pool.
'''
class CustomDataset(Dataset):
def __init__(self, data, tr = True, cropped=False):
self.data = data
if not cropped:
self.paths = self.data['img_path'].values.astype('str')
else:
self.paths = self.data['img_path_face_only'].values.astype('str')
self.data_len = self.data.shape[0]
self.labels = self.data[q_list].values.astype('int32')
self.image_metrics = self.data[im_list].values.astype('float32')
# transforms
if tr:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([
transforms.RandomResizedCrop(224),
transforms.RandomRotation(20),
transforms.ColorJitter(brightness=0.1,contrast=0.1,saturation=0.1,hue=0.1)], p=0.75),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
else:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
def __getitem__(self, index):
img_path = PATH + '/'+ self.paths[index]
img = Image.open(img_path)
img_tensor = self.transforms(img)
label = self.labels[index]
image_metric = self.image_metrics[index]
return (img_tensor, label, image_metric)
def __len__(self):
return self.data_len
#get pretrained resnet50 model
def get_pretrained():
model = models.resnet50(pretrained=True)
return model
#replace last layer
def prepare_for_finetuning(model):
for param in model.parameters():
param.requires_grad = False
param.requires_grad = True
#replacing last layer with new fully connected
model.fc = torch.nn.Linear(model.fc.in_features,n_outs)
return
# create an object that uses CustomDataset object from above to load multiple observations in parallel
def create_dataloader(data,rand=True, cropped=False):
if rand: # shuffle observations
dataset = CustomDataset(data, tr=True, cropped=cropped)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=10, drop_last=False)
else: # load observations in the original order from data
dataset = CustomDataset(data, tr=False, cropped=cropped)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, sampler = torch.utils.data.sampler.SequentialSampler(dataset), num_workers=10, drop_last=False)
return loader
#finetune and save neural net model
def finetune_and_save(loader_train, loader_test):
# loading pretrained model and preparing it for finetuning
model = get_pretrained()
prepare_for_finetuning(model)
if CUDA:
model.cuda()
# optimize only last six layers
layers = list(model.children())
params = list(layers[len(layers)-1].parameters())+list(layers[len(layers)-2].parameters())+list(layers[len(layers)-3].parameters())+list(layers[len(layers)-4].parameters())+list(layers[len(layers)-5].parameters())+list(layers[len(layers)-6].parameters())
optimizer = optim.Adamax(params=params, lr=0.001)
hist = {}
hist['d_labs'] = q_list
hist['train_loss'] = []
hist['val_loss'] = []
hist['train_loss_d'] = []
hist['val_loss_d'] = []
hist['train_auc_d'] = []
hist['val_auc_d'] = []
# train and evaluate
for epoch in range(N_EPOCHS):
train_loss, train_loss_d, train_auc_d = run_epoch(model, loss_f, optimizer, loader_train, update_model = True) # training
eval_loss, eval_loss_d, eval_auc_d = run_epoch(model, loss_f, optimizer, loader_test, update_model = False) # evaluation
#print('epoch: {} \ttrain loss: {:.6f} \tvalidation loss: {:.6f}'.format(epoch, train_loss, eval_loss))
hist['train_loss'].append(train_loss)
hist['val_loss'].append(eval_loss)
hist['train_loss_d'].append(train_loss_d)
hist['val_loss_d'].append(eval_loss_d)
hist['train_auc_d'].append(train_auc_d)
hist['val_auc_d'].append(eval_auc_d)
# # write this
# for i in range(len(q_list)):
# print('variable: {}\t {} \ttrain auc: {:.6f} \tvalidation auc: {:.6f}'.format(
# q_list[i], q_to_name_dict[q_list[i]], train_auc_d[i], eval_auc_d[i]))
with open(RESULTS+'/eval_record.json', 'w') as fjson:
json.dump(hist, fjson)
# saving model
torch.save(model, RESULTS+"/finetuned_model")
return
# function that performa training (or evaluation) over an epoch (full pass through a data set)
def run_epoch(model, loss_f, optimizer, loader, update_model = False):
if update_model:
model.train()
else:
model.eval()
loss_hist = []
loss_hist_detailed = []
auc_hist_detailed = []
for batch_i, var in tqdm(enumerate(loader)):
loss, loss_detailed, auc_detailed = loss_f(model, var)
if update_model:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_hist.append(loss.data.item())
loss_hist_detailed.append(loss_detailed)
auc_hist_detailed.append(auc_detailed)
loss_detailed = pd.DataFrame(loss_hist_detailed)
loss_detailed.columns = q_list
auc_detailed = pd.DataFrame(auc_hist_detailed)
auc_detailed.columns = q_list
return np.mean(loss_hist).item(), loss_detailed.mean(0).values.tolist(), auc_detailed.mean(0).values.tolist()
# function to compute loss from a batch data
def loss_f(model, var):
data, target, _ = var
# data [n, 3, 224, 224]
# target [n, 349]
# image metrics [n, 11]
data, target = Variable(data), Variable(target)
if CUDA:
data, target = data.cuda(), target.cuda()
output = model(data) # [n, 2*349=698]
loss = 0
loss_detailed = []
auc_detailed = []
for i in range(len(q_d_list)):
# load class weight for variable i
w = torch.FloatTensor(class_weights[i])
if CUDA:
w = w.cuda()
# output contains scores for each level of every predicted variable
# q_d_list[i] is number of levels to variable i
# q_d_list_cumsum[i] is a cumulative sum over number of levels for variable i and all variables before it
# all variables ordered as in q_list
# (q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i] then gives exact coordinates of the scores for variable i
# among all scores in the output
temp = F.cross_entropy(output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]], target[:,i].long(), weight=w)
loss_detailed.append(temp.data.item())
loss += temp
# now we calculate AUC
y_true = target[:,i].detach().cpu().numpy() # true label
y_score = output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]].detach().cpu().numpy()[:,1] # score corresponding to level 1
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score)
auc_detailed.append(metrics.auc(fpr, tpr))
return loss, loss_detailed, auc_detailed
# building class balancing weights as in
# https://datascience.stackexchange.com/questions/13490/how-to-set-class-weights-for-imbalanced-classes-in-keras
def calculate_class_weights(X):
class_weights = []
for i in q_list:
class_weights.append(
class_weight.compute_class_weight('balanced', np.unique(X[i].values), X[i].values))
return class_weights
# extract data from a dataloader as a set of image features X and set of labels y, corresponding to those image features
# can also blackout specified areas of the loaded images before extracting the image features -- this is used in our experiments
# when data loader is deterministic, then it will load in the same data again and again
def extract_data(loader, modelred, blackout=None):
X = []
y = []
z = []
for batch_i, var in tqdm(enumerate(loader)):
data, target, immetr = var
if blackout is not None:
data[:, :, blackout[0]:blackout[1], blackout[2]:blackout[3]] = 0.0
data, target, immetr = Variable(data), Variable(target), Variable(immetr)
if CUDA:
data, target, immetr = data.cuda(), target.cuda(), immetr.cuda()
data_out = modelred(data)
X.append(data_out.detach().cpu().numpy())
y.append(target.detach().cpu().numpy())
z.append(immetr.detach().cpu().numpy())
X = np.vstack(X).squeeze()
y = np.vstack(y)
z = np.vstack(z)
return X, y, z
# function to evaluate a set of trained classifier using AUC metric
# 'models' contains classifiers in order of binary variables to be predicted -- which are contaiend in Y
# X is a matrix of covariates
def analytics_lin(models, X, Y):
auc = {}
for i in tqdm(range(Y.shape[1])):
y_true = Y[:,i]
mod = models[i]
# auc
y_prob = mod.predict_proba(X)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_prob)
auc[q_list[i]] = metrics.auc(fpr, tpr)
return auc
# sequentially yield coordinates for blackout in an image
def sliding_window(image_shape, stepSize, windowSize):
# slide a window across the image
for yc in range(0, image_shape[0], stepSize):
for xc in range(0, image_shape[1], stepSize):
# yield the current window
yield (yc, yc + windowSize[1], xc, xc + windowSize[0])
# calculating decrease in AUC when blocking a particular area of an image -- over 8x8 grid placed over the image
def img_area_importance(modelred, models, svd, dat, auc_true):
patch_importance = {}
for (y0, y1, x0, x1) in sliding_window(image_shape=(224,224), stepSize = 28, windowSize=(28,28)):
loader = create_dataloader(dat,rand=False)
# X_modified_raw contains image features extracted from images with a portion of the image blocked
X_modified_raw, Y, _ = extract_data(loader, modelred, (y0, y1, x0, x1))
# image features reduced to 500 via svd
X_modified = svd.transform(X_modified_raw)
auc = analytics_lin(models, X_modified, Y)
patch_importance_q = {} # contains -(decrease in auc after blocking of an image)
for q in q_list:
patch_importance_q[q] = auc_true[q] - auc[q]
patch_importance[(y0, y1, x0, x1)] = patch_importance_q # decrease in auc across all variables -- for the given blocked portion of the image
return patch_importance
# START OF THE RUN
torch.set_num_threads(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
N_EPOCHS = 20
FINETUNE = True
CUDA = torch.cuda.is_available()
batch_size=10
PATH = './data'
RESULTS = './results'
os.makedirs(RESULTS, exist_ok=True)
#finetune model just by running this script
data = pd.read_csv(PATH+'/data.csv')
# data summary stats
# data size
data.shape # observations
data['randomID'].unique().shape # users
data[data['source']==1].shape # observations - qualtrics
data['randomID'][data['source']==1].unique().shape # users - qualtrics
data[data['source']==0].shape # observations - mturk
data['randomID'][data['source']==0].unique().shape # users - mturk
# female Q11_1 stats by data source
data['Q11_1'].mean()
data['Q11_1'][data['source']==1].mean() # qualtrics
data['Q11_1'][data['source']==0].mean() # mturk
# Generating a set of useful global constants
# sorted list of variables
q_list = sorted(list(q_to_name_dict.keys()))
q_to_d_dict = {} # number of levels per variable (portion of code were originally written to support multinomial, not only binary vars)
random_threshold = {} # random guess threshold
prop = {} # proportion of class 1 in the data (vs. 0)
for i in q_list:
q_to_d_dict[i] = np.unique(data[i]).shape[0]
random_threshold[i] = 1.0/q_to_d_dict[i]
prop[i] = data[i].sum()/data.shape[0]
q_d_list = [q_to_d_dict[q] for q in q_list] # vector containing number of levels per variable -- where variables are ordered as in q_list
q_d_list_cumsum = np.cumsum(q_d_list) # cumulative sum over variable levels
# total number of levels across variables
n_outs=q_d_list_cumsum[-1]
# image metrics
im_list = sorted(list(image_metrics.keys()))
# logistic regresssion wrapper
def logistic_regression(Xtr, Xts):
return LogisticRegression(penalty='l2', C=0.05, random_state=0, tol=1e-6, max_iter=1e7,
solver='lbfgs', class_weight='balanced').fit(Xtr, Xts)
# train many regressions
def train_eval_regressions(Xtr, Ytr, Xts, Yts):
lin_models = []
for i in tqdm(range(len(q_list))):
clf = logistic_regression(Xtr, Ytr[:,i])
lin_models.append(clf)
auc = analytics_lin(lin_models, Xts, Yts)
return auc, lin_models
# TRAINING
np.random.seed(999)
torch.manual_seed(999)
# load a pretrained resnet-50 network
model = get_pretrained()
# modelred is a subset of model that outputs a vector of image features per image
modelred = torch.nn.Sequential(*list(model.children())[:-1])
modelred.eval()
if CUDA:
modelred.cuda()
n_reps = 20 # number of repeats for 5-fold cross-valaidtion
gkf = KFold(n_splits=5)
results_auc = []
results_patch_importance = []
results_auc_cropped = []
results_auc_demographics = []
results_auc_browser = []
results_auc_shallowfacemetrics = []
results_auc_browser_demographics = []
results_auc_browser_shallowfacemetrics = []
results_auc_demographics_shallowfacemetrics = []
results_auc_browser_demographics_shallowfacemetrics = []
results_auc_all_plus_img = []
results_auc_all_plus_img_cropped = []
# individual IDs
IDs = data['randomID'].unique()
for rep in tqdm(range(n_reps)):
# shuffling every repetition to get new folds via cv procedure
np.random.shuffle(IDs)
data_shuffled = data.sample(frac=1.0) # shufling observations too
for trainID, testID in tqdm(gkf.split(IDs)):
# extracting split data
data_train = data_shuffled[data_shuffled['randomID'].isin(IDs[trainID])]
data_test = data_shuffled[data_shuffled['randomID'].isin(IDs[testID])]
# calculating class weights to balance data -- in order of q_list
class_weights = calculate_class_weights(data_train)
# creating data loaders
loader_train = create_dataloader(data_train,rand=False)
if FINETUNE:
loader_train_rand = create_dataloader(data_train,rand=True)
loader_test = create_dataloader(data_test,rand=False)
# finetuning model
if FINETUNE:
finetune_and_save(loader_train_rand, loader_test) # saves to RESULTS+"/finetuned_model"
model = torch.load(RESULTS+"/finetuned_model")
modelred = torch.nn.Sequential(*list(model.children())[:-1])
modelred.eval()
if CUDA:
modelred.cuda()
# extracting image features, labels, and ratios calculated from images (used as control)
X_train_raw, Y_train, Z_train = extract_data(loader_train, modelred)
X_test_raw, Y_test, Z_test = extract_data(loader_test, modelred)
# reducing number of features
svd = TruncatedSVD(n_components=500, random_state=0, n_iter=100).fit(X_train_raw)
X_train = svd.transform(X_train_raw)
X_test = svd.transform(X_test_raw)
# creating data loaders - CROPPED
loader_train_cropped = create_dataloader(data_train,rand=False,cropped=True)
loader_test_cropped = create_dataloader(data_test,rand=False,cropped=True)
# extracting image features and labels
X_train_raw_cropped, _, _ = extract_data(loader_train_cropped, modelred)
X_test_raw_cropped, _, _ = extract_data(loader_test_cropped, modelred)
# reducing number of features
svd_cropped = TruncatedSVD(n_components=500, random_state=0, n_iter=100).fit(X_train_raw_cropped)
X_train_cropped = svd_cropped.transform(X_train_raw_cropped)
X_test_cropped = svd_cropped.transform(X_test_raw_cropped)
# variables
demographic_vars = ['Q11_1','Q11_2','Q12_1','Q12_2','Q13_1','Q13_2','Q13_3','Q13_4']
browser_vars = ['Q6_1_TEXT_0', 'Q6_1_TEXT_1']
demographic_index = [ i for i in range(len(q_list)) if q_list[i] in demographic_vars]
browser_index = [ i for i in range(len(q_list)) if q_list[i] in browser_vars]
demographic_browser_index = [ i for i in range(len(q_list)) if q_list[i] in (demographic_vars+browser_vars)]
# TRAINING
# deep image features
auc, lin_models = train_eval_regressions(X_train, Y_train, X_test, Y_test)
results_auc.append(auc)
# heat maps - image area importance
patch_importance = img_area_importance(modelred, lin_models, svd, data_test, auc)
results_patch_importance.append(patch_importance)
# deep image features CROPPED
auc, lin_models = train_eval_regressions(X_train_cropped, Y_train, X_test_cropped, Y_test)
results_auc_cropped.append(auc)
# demographics
auc, lin_models = train_eval_regressions(Y_train[:,demographic_index], Y_train, Y_test[:,demographic_index], Y_test)
results_auc_demographics.append(auc)
# browser
auc, lin_models = train_eval_regressions(Y_train[:,browser_index], Y_train, Y_test[:,browser_index], Y_test)
results_auc_browser.append(auc)
# manual (shallow) facial metrics
auc, lin_models = train_eval_regressions(Z_train, Y_train, Z_test, Y_test)
results_auc_shallowfacemetrics.append(auc)
# browser + demographics
auc, lin_models = train_eval_regressions(Y_train[:,demographic_browser_index], Y_train, Y_test[:,demographic_browser_index], Y_test)
results_auc_browser_demographics.append(auc)
# browser + manual facial metrics
auc, lin_models = train_eval_regressions(np.concatenate([Y_train[:,browser_index], Z_train],1), Y_train,
np.concatenate([Y_test[:,browser_index], Z_test],1), Y_test)
results_auc_browser_shallowfacemetrics.append(auc)
# demographics + manual facial metrics
auc, lin_models = train_eval_regressions(np.concatenate([Y_train[:,demographic_index], Z_train],1), Y_train,
np.concatenate([Y_test[:,demographic_index], Z_test],1), Y_test)
results_auc_demographics_shallowfacemetrics.append(auc)
# browser + demographics + manual facial metrics
auc, lin_models = train_eval_regressions(np.concatenate([Y_train[:,demographic_browser_index], Z_train],1), Y_train,
np.concatenate([Y_test[:,demographic_browser_index], Z_test],1), Y_test)
results_auc_browser_demographics_shallowfacemetrics.append(auc)
# browser + demographics + manual facial metrics + deep image features
auc, lin_models = train_eval_regressions(np.concatenate([X_train, Y_train[:,demographic_browser_index], Z_train],1), Y_train,
np.concatenate([X_test, Y_test[:,demographic_browser_index], Z_test],1), Y_test)
results_auc_all_plus_img.append(auc)
auc, lin_models = train_eval_regressions(np.concatenate([X_train_cropped, Y_train[:,demographic_browser_index], Z_train],1), Y_train,
np.concatenate([X_test_cropped, Y_test[:,demographic_browser_index], Z_test],1), Y_test)
results_auc_all_plus_img_cropped.append(auc)
# saving results
pd.DataFrame(results_auc).to_csv(RESULTS+'/crossvalidation_auc.csv', index=False)
pd.DataFrame(results_auc_cropped).to_csv(RESULTS+'/crossvalidation_auc_cropped.csv', index=False)
pd.DataFrame(results_auc_demographics).to_csv(RESULTS+'/crossvalidation_auc_demographics.csv', index=False)
| pd.DataFrame(results_auc_browser) | pandas.DataFrame |
from pathlib import Path
import itertools
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
DATA_FOLDER = Path("data-ic")
def get_timeline():
df = pd.read_csv(Path("data", "nice_ic_by_day.csv"))
dates = sorted(df["Datum"].unique())
return dates
def export_date(df, data_folder, prefix, data_date=None, label=None):
if data_date:
df_date = df.loc[df["Datum"] == data_date, :]
else:
df_date = df
# export with data date
if label is not None:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}_{label}.csv")
else:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}.csv")
print(f"Export {export_path}")
df_date.to_csv(export_path, index=False)
# LCPS data
def main_lcps():
df_reported = pd.read_csv(Path("data", "lcps_ic_country.csv"))
df_reported['Aantal'] = df_reported["Aantal"].astype(pd.Int64Dtype())
dates = sorted(df_reported["Datum"].unique())
for i in dates:
data = {'Datum': [i],
'Land': ['Totaal'], 'Aantal':['NA']}
new = pd.DataFrame(data, columns = ['Datum','Land','Aantal'])
new['Aantal'] = sum(df_reported.loc[df_reported['Datum'] == i, 'Aantal'])
df_reported = df_reported.append(new, ignore_index = True)
df_reported = df_reported.sort_values('Datum', ascending=True)
df_reported['Aantal'] = df_reported["Aantal"].astype( | pd.Int64Dtype() | pandas.Int64Dtype |
from kfp.v2.dsl import (Dataset, Input, Output)
def calc_market_watch(
date_ref: str,
# comp_result : str,
):
import pandas as pd
import numpy as np
import pandas_gbq # type: ignore
import time
from trading_calendars import get_calendar
cal_krx = get_calendar('XKRX')
from pandas.tseries.offsets import CustomBusinessDay
cbday = CustomBusinessDay(holidays=cal_krx.adhoc_holidays)
def get_df_market(date_ref, n_before):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
date_ref_b = (pd.Timestamp(date_ref) - pd.Timedelta(n_before, 'd')).strftime('%Y-%m-%d')
sql = f'''
SELECT
*
FROM
`dots-stock.red_lion.df_markets_clust_parti`
WHERE
date between "{date_ref_b}" and "{date_ref_}"
'''
PROJECT_ID = 'dots-stock'
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID, use_bqstorage_api=True)
df = df.drop_duplicates()
return df
df_markets_1 =get_df_market(date_ref, 20)
def get_n_day_straight_up(NN):
df_markets_ = (df_markets_1
[lambda df: df.date >= pd.Timestamp(date_ref) - (NN-1) * cbday ]
.sort_values('date', ascending=True)
)
l_N_d_up = (df_markets_
[lambda df: df.Open != 0] # Open 가격이 0 인 경우 그날 거래 없었던 것
.assign(
oc=lambda df: (df.Close - df.Open)/df.Open,
)
[lambda df: df.oc > 0]
[lambda df: df.ChagesRatio > 0]
.groupby(['Name'])
[['Code']].agg('count')
.rename(columns={'Code':'count_Nd_up'})
[lambda df: df.count_Nd_up == NN]
).index.to_list()
return l_N_d_up
def get_n_day_straight_dn(NN):
df_markets_ = (df_markets_1
[lambda df: df.date >= pd.Timestamp(date_ref) - (NN-1) * cbday ]
.sort_values('date', ascending=True)
)
l_N_d_up = (df_markets_
[lambda df: df.Open != 0] # Open 가격이 0 인 경우 그날 거래 없었던 것
.assign(
oc=lambda df: (df.Close - df.Open)/df.Open,
)
[lambda df: df.oc < 0]
[lambda df: df.ChagesRatio < 0]
.groupby(['Name'])
[['Code']].agg('count')
.rename(columns={'Code':'count_Nd_up'})
[lambda df: df.count_Nd_up == NN]
).index.to_list()
return l_N_d_up
def get_n_day_straight_up_last_dn(NN):
'''연속 몇일 오르고 마지막 내린 종목
Return : list 종목명
'''
df_markets_ = (df_markets_1
[lambda df: df.date >= pd.Timestamp(date_ref) - (NN-1) * cbday ]
.sort_values('date', ascending=True)
)
l_Nd_dn_last_up = (df_markets_
[lambda df: df.Open != 0] # Open 가격이 0 인 경우 그날 거래 없었던 것
.assign(
oc=lambda df: (df.Close - df.Open)/df.Open,
last_day=lambda df: df['date'] == pd.Timestamp(date_ref),
last_day_down =
lambda df: (df.last_day == True) & (df.oc < 0),
rest_day_up =
lambda df: (df.last_day == False) & (df.oc > 0),
both_met =
lambda df: (df.last_day_down | df.rest_day_up),
)
# filter 조건 맞는 경우만
.loc[lambda df: df.both_met == True]
# groupby 해서 조건맞는 경우가 종목당 6개 인지 확인
.groupby('Name')
[['Code']].agg('count')
.rename(columns={'Code':'count_Nd_up'})
[lambda df: df.count_Nd_up == NN]
# [lambda df: df['Code'] == NN]
).index.to_list()
return l_Nd_dn_last_up
def get_n_day_straight_dn_last_up(NN):
'''연속 몇일 내리고 마지막 오른 종목
Return : list 종목명
'''
df_markets_ = (df_markets_1
[lambda df: df.date >= pd.Timestamp(date_ref) - (NN-1) * cbday ]
.sort_values('date', ascending=True)
)
l_Nd_dn_last_up = (df_markets_
[lambda df: df.Open != 0] # Open 가격이 0 인 경우 그날 거래 없었던 것
.assign(
oc=lambda df: (df.Close - df.Open)/df.Open,
last_day=lambda df: df['date'] == pd.Timestamp(date_ref),
last_day_down =
lambda df: (df.last_day == True) & (df.oc > 0),
rest_day_up =
lambda df: (df.last_day == False) & (df.oc < 0),
both_met =
lambda df: (df.last_day_down | df.rest_day_up),
)
# filter 조건 맞는 경우만
.loc[lambda df: df.both_met == True]
# groupby 해서 조건맞는 경우가 종목당 6개 인지 확인
.groupby('Name')
[['Code']].agg('count')
.rename(columns={'Code':'count_Nd_up'})
[lambda df: df.count_Nd_up == NN]
# [lambda df: df['Code'] == NN]
).index.to_list()
return l_Nd_dn_last_up
def make_df_with_func_2(func1, date_ref):
df_market_watch = \
| pd.DataFrame(columns=['date_ref','Ndays', 'codes', 'num_codes']) | pandas.DataFrame |
import streamlit as st
import base64
import pandas as pd
import numpy as np
import math # add to requirements
from datetime import timedelta
from json import dumps
from st_utils import texts
from st_utils.viz import make_simulation_chart, prep_tidy_data_to_plot, make_combined_chart, plot_r0
from st_utils.formats import global_format_func
from hospital_queue.confirmation_button import cache_on_button_press
from hospital_queue.queue_simulation import run_queue_simulation
from covid19.utils import get_latest_file
from covid19 import data
from covid19.models import SEIRBayes
from covid19.estimation import ReproductionNumber
FATAL_RATE_BASELINE = 0.0138 # Verity R, <NAME>, <NAME> et al. Estimates of the severity of covid-19 disease. \
# medRxiv 2020.
SAMPLE_SIZE=500
MIN_CASES_TH = 10
MIN_DAYS_r0_ESTIMATE = 14
MIN_DEATH_SUBN = 3
MIN_DATA_BRAZIL = '2020-03-26'
DEFAULT_CITY = 'São Paulo/SP'
DEFAULT_STATE = 'SP'
DEFAULT_PARAMS = {
'fator_subr': 10,
'gamma_inv_dist': (7.0, 14.0, 0.95, 'lognorm'),
'alpha_inv_dist': (4.1, 7.0, 0.95, 'lognorm'),
'r0_dist': (2.5, 6.0, 0.95, 'lognorm'),
# Simulations params
'confirm_admin_rate': .07, # considerando 2,8% a mortalidade do cdc para a pirâmide etária do Brasil
'length_of_stay_covid': 9,
'length_of_stay_covid_uti': 8,
'icu_rate': .0, # deve ser zero após implementarmos transferência dos mais graves do leito normal p/ a UTI \
# quando os leitos normais lotarem antes
'icu_rate_after_bed': .25,
'icu_death_rate': .78,
'icu_queue_death_rate': .0,
'queue_death_rate': .0,
'total_beds': 12222,
'total_beds_icu': 2421,
'available_rate': .36,
'available_rate_icu': .36
}
def prepare_for_r0_estimation(df):
return (
df
['newCases']
.asfreq('D')
.fillna(0)
.rename('incidence')
.reset_index()
.rename(columns={'date': 'dates'})
.set_index('dates')
)
@st.cache
def make_brazil_cases(cases_df):
return (cases_df
.stack(level=1)
.sum(axis=1)
.unstack(level=1))
@st.cache
def make_place_options(cases_df, population_df):
return (cases_df
.swaplevel(0,1, axis=1)
['totalCases']
.pipe(lambda df: df >= MIN_CASES_TH)
.any()
.pipe(lambda s: s[s & s.index.isin(population_df.index)])
.index)
@st.cache
def make_date_options(cases_df, place):
return (cases_df
[place]
['totalCases']
.pipe(lambda s: s[s >= MIN_CASES_TH])
[MIN_DATA_BRAZIL:]
.index
.strftime('%Y-%m-%d'))
def make_param_widgets(NEIR0, reported_rate, r0_samples=None, defaults=DEFAULT_PARAMS):
_N0, _EIR0 = map(int, NEIR0)
interval_density = 0.95
family = 'lognorm'
fator_subr = st.sidebar.number_input(
'Taxa de reportagem de infectados. Porcentagem dos infectados que testaram positivo',
min_value=0.0, max_value=100.0, step=1.0,
value=reported_rate)
st.sidebar.markdown('#### Condições iniciais')
N = st.sidebar.number_input('População total (N)',
min_value=0, max_value=1_000_000_000, step=500_000,
value=_N0)
EIR0 = st.sidebar.number_input('Indivíduos que já foram infectados e confirmados',
min_value=0, max_value=1_000_000_000,
value=_EIR0)
st.sidebar.markdown('#### Período de infecção (1/γ) e tempo incubação (1/α)')
gamma_inf = st.sidebar.number_input(
'Limite inferior do período infeccioso médio em dias (1/γ)',
min_value=1.0, max_value=60.0, step=1.0,
value=defaults['gamma_inv_dist'][0])
gamma_sup = st.sidebar.number_input(
'Limite superior do período infeccioso médio em dias (1/γ)',
min_value=1.0, max_value=60.0, step=1.0,
value=defaults['gamma_inv_dist'][1])
alpha_inf = st.sidebar.number_input(
'Limite inferior do tempo de incubação médio em dias (1/α)',
min_value=0.1, max_value=60.0, step=1.0,
value=defaults['alpha_inv_dist'][0])
alpha_sup = st.sidebar.number_input(
'Limite superior do tempo de incubação médio em dias (1/α)',
min_value=0.1, max_value=60.0, step=1.0,
value=defaults['alpha_inv_dist'][1])
st.sidebar.markdown('#### Parâmetros gerais')
t_max = st.sidebar.number_input('Período de simulação em dias (t_max)',
min_value=1, max_value=8*30, step=15,
value=180)
return {'fator_subr': fator_subr,
'alpha_inv_dist': (alpha_inf, alpha_sup, interval_density, family),
'gamma_inv_dist': (gamma_inf, gamma_sup, interval_density, family),
't_max': t_max,
'NEIR0': (N, EIR0)}
def make_param_widgets_hospital_queue(location, w_granularity, defaults=DEFAULT_PARAMS):
def load_beds(ibge_codes):
# leitos
beds_data = pd.read_csv(get_latest_file('ibge_leitos'))
ibge_codes = pd.Series(ibge_codes).rename('codes_to_filter')
beds_data_filtered = (beds_data[beds_data['codibge'].isin(ibge_codes)]
[['qtd_leitos', 'qtd_uti']]
.sum())
return beds_data_filtered['qtd_leitos'], beds_data_filtered['qtd_uti']
if w_granularity == 'state':
uf = location
qtd_beds, qtd_beds_uci = load_beds(data.get_ibge_codes_uf(uf))
else:
city, uf = location.split("/")
qtd_beds, qtd_beds_uci = load_beds([data.get_ibge_code(city, uf)])
# TODO: Adjust reliable cCFR
# admiss_rate = FATAL_RATE_BASELINE/cCFR
st.sidebar.markdown('---')
st.sidebar.markdown('#### Parâmetros da simulação hospitalar')
confirm_admin_rate = st.sidebar.number_input(
'Porcentagem de confirmados que são hospitalizados (%)',
step=1.0,
min_value=0.0,
max_value=100.0,
value=DEFAULT_PARAMS['confirm_admin_rate']*100)
los_covid = st.sidebar.number_input(
'Tempo de estadia médio no leito comum (dias)',
step=1,
min_value=1,
max_value=100,
value=DEFAULT_PARAMS['length_of_stay_covid'])
los_covid_icu = st.sidebar.number_input(
'Tempo de estadia médio na UTI (dias)',
step=1,
min_value=1,
max_value=100,
value=DEFAULT_PARAMS['length_of_stay_covid_uti'])
icu_rate = st.sidebar.number_input(
'Taxa de pacientes encaminhados para UTI diretamente',
step=.1,
min_value=.0,
max_value=1.,
value=DEFAULT_PARAMS['icu_rate'])
icu_death_rate = st.sidebar.number_input(
'Taxa de mortes após estadia na UTI',
step=.01,
min_value=.0,
max_value=1.,
value=DEFAULT_PARAMS['icu_death_rate'])
icu_queue_death_rate = st.sidebar.number_input(
'Taxa de mortes na fila da UTI',
step=.01,
min_value=.0,
max_value=1.,
value=DEFAULT_PARAMS['icu_queue_death_rate'])
queue_death_rate = st.sidebar.number_input(
'Taxa de mortes na fila dos leitos normais',
step=.01,
min_value=.0,
max_value=1.,
value=DEFAULT_PARAMS['queue_death_rate'])
icu_after_bed = st.sidebar.number_input(
'Taxa de pacientes encaminhados para UTI a partir dos leitos',
step=.1,
min_value=.0,
max_value=1.,
value=DEFAULT_PARAMS['icu_rate_after_bed'])
total_beds = st.sidebar.number_input(
'Quantidade de leitos',
step=1,
min_value=0,
max_value=int(1e7),
value=int(qtd_beds))
total_beds_icu = st.sidebar.number_input(
'Quantidade de leitos de UTI',
step=1,
min_value=0,
max_value=int(1e7),
value=int(qtd_beds_uci))
available_rate = st.sidebar.number_input(
'Proporção de leitos disponíveis',
step=.1,
min_value=.0,
max_value=1.,
value=DEFAULT_PARAMS['available_rate'])
available_rate_icu = st.sidebar.number_input(
'Proporção de leitos de UTI disponíveis',
step=.1,
min_value=.0,
max_value=1.,
value=DEFAULT_PARAMS['available_rate_icu'])
return {"confirm_admin_rate": confirm_admin_rate,
"los_covid": los_covid,
"los_covid_icu": los_covid_icu,
"icu_rate": icu_rate,
"icu_death_rate": icu_death_rate,
"icu_queue_death_rate": icu_queue_death_rate,
"queue_death_rate": queue_death_rate,
"icu_after_bed": icu_after_bed,
"total_beds": total_beds,
"total_beds_icu": total_beds_icu,
"available_rate": available_rate,
"available_rate_icu": available_rate_icu
}
@st.cache
def make_NEIR0(cases_df, population_df, place, date,reported_rate):
N0 = population_df[place]
EIR = cases_df[place]['totalCases'][date]
return (N0, EIR)
def make_download_href(df, params, should_estimate_r0, r0_dist):
_params = {
'subnotification_factor': params['fator_subr'],
'incubation_period': {
'lower_bound': params['alpha_inv_dist'][0],
'upper_bound': params['alpha_inv_dist'][1],
'density_between_bounds': params['alpha_inv_dist'][2]
},
'infectious_period': {
'lower_bound': params['gamma_inv_dist'][0],
'upper_bound': params['gamma_inv_dist'][1],
'density_between_bounds': params['gamma_inv_dist'][2]
},
}
if should_estimate_r0:
_params['reproduction_number'] = {
'samples': list(r0_dist)
}
else:
_params['reproduction_number'] = {
'lower_bound': r0_dist[0],
'upper_bound': r0_dist[1],
'density_between_bounds': r0_dist[2]
}
csv = df.to_csv(index=False)
b64_csv = base64.b64encode(csv.encode()).decode()
b64_params = base64.b64encode(dumps(_params).encode()).decode()
size = (3*len(b64_csv)/4)/(1_024**2)
return f"""
<a download='covid-simulator.3778.care.csv'
href="data:file/csv;base64,{b64_csv}">
Clique para baixar os resultados da simulação em format CSV ({size:.02} MB)
</a><br>
<a download='covid-simulator.3778.care.json'
href="data:file/json;base64,{b64_params}">
Clique para baixar os parâmetros utilizados em formato JSON.
</a>
"""
def make_EI_df(model_output, sample_size, date):
_, E, I, R, t = model_output
size = sample_size*model.params['t_max']
NI = np.add(pd.DataFrame(I).apply(lambda x: x - x.shift(1)).values,
pd.DataFrame(R).apply(lambda x: x - x.shift(1)).values)
df = (pd.DataFrame({'Exposed': E.reshape(size),
'Infected': I.reshape(size),
'Recovered': R.reshape(size),
'Newly Infected': NI.reshape(size),
'Run': np.arange(size) % sample_size}
).assign(Day=lambda df: (df['Run'] == 0).cumsum() - 1))
return df.assign(
Date=df['Day'].apply(lambda x: pd.to_datetime(date) + timedelta(days=x)))
def plot_EI(model_output, scale):
_, E, I, _, t = model_output
source = prep_tidy_data_to_plot(E, I, t)
return make_combined_chart(source,
scale=scale,
show_uncertainty=True)
@cache_on_button_press('Simular Modelo de Filas')
def run_queue_model(model_output, cases_df, w_place, w_date, params_simulation):
bar_text = st.empty()
bar_text.text('Estimando crecscimento de infectados...')
simulations_outputs = []
dataset, cut_after = calculate_input_hospital_queue(model_output , cases_df, w_place, w_date)
for execution_columnm, execution_description in [('newly_infected_lower', 'Otimista'),
('newly_infected_mean', 'Médio'),
('newly_infected_upper', 'Pessimista')]:
# TODO bug:review order of magnitude of all parameters (make sure it is consistant)
dataset = dataset.assign(hospitalizados=0)
for idx, row in dataset.iterrows():
if idx < cut_after:
dataset['hospitalizados'].iloc[idx] = round(dataset[execution_columnm].iloc[idx] * params_simulation['confirm_admin_rate']/reported_rate)
else:
dataset['hospitalizados'].iloc[idx] = round(dataset[execution_columnm].iloc[idx] * (params_simulation['confirm_admin_rate']/100))
# dataset = dataset.assign(hospitalizados=round(dataset[execution_columnm]\
# *params_simulation['confirm_admin_rate']*reported_rate/1000))
# st.write("input modelo")
# st.write(dataset.tail())
bar_text = st.empty()
bar = st.progress(0)
bar_text.text(f'Processando o cenário {execution_description.lower()}...')
simulation_output = (run_queue_simulation(dataset, bar, bar_text, params_simulation)
.join(dataset, how='inner'))
simulations_outputs.append((execution_columnm, execution_description, simulation_output))
bar.progress(1.)
bar_text.text(f"Processamento do cenário {execution_description.lower()} finalizado.")
return simulations_outputs, cut_after
def calculate_input_hospital_queue(model_output, cases_df, place, date):
S, E, I, R, t = model_output
previous_cases = cases_df[place]
# Formatting previous dates
all_dates = pd.date_range(start=MIN_DATA_BRAZIL, end=date).strftime('%Y-%m-%d')
all_dates_df = pd.DataFrame(index=all_dates,
data={"dummy": np.zeros(len(all_dates))})
previous_cases = all_dates_df.join(previous_cases, how='left')['newCases']
cut_after = previous_cases.shape[0]
# Calculating newly infected for all samples
size = sample_size*model.params['t_max']
NI = np.add(pd.DataFrame(I).apply(lambda x: x - x.shift(1)).values,
pd.DataFrame(R).apply(lambda x: x - x.shift(1)).values)
pred = (pd.DataFrame({'Newly Infected': NI.reshape(size),
'Run': np.arange(size) % sample_size,
'Day': np.floor(np.arange(size) / sample_size) + 1}))
pred = pred.assign(day=pred['Day'].apply(lambda x: pd.to_datetime(date) + timedelta(days=(x-1))))
# Calculating standard deviation and mean
def droplevel_col_index(df: pd.DataFrame):
df.columns = df.columns.droplevel()
return df
df = (pred[['Newly Infected', 'day']]
.groupby("day")
.agg({"Newly Infected": [np.mean, np.std]})
.pipe(droplevel_col_index)
.assign(upper=lambda df: df["mean"] + df["std"])
.assign(lower=lambda df: df["mean"] - df["std"])
.add_prefix("newly_infected_")
.join(previous_cases, how='outer')
)
# Formatting the final otput
df = (df
.assign(newly_infected_mean=df['newly_infected_mean'].combine_first(df['newCases']))
.assign(newly_infected_upper=df['newly_infected_upper'].combine_first(df['newCases']))
.assign(newly_infected_lower=df['newly_infected_lower'].combine_first(df['newCases']))
.assign(newly_infected_lower=lambda df: df['newly_infected_lower'].clip(lower=0))
.drop(columns=['newCases', 'newly_infected_std'])
.reset_index()
.rename(columns={'index':'day'}))
return df, cut_after
def estimate_r0(cases_df, place, sample_size, min_days, w_date):
used_brazil = False
incidence = (
cases_df
[place]
.query("totalCases > @MIN_CASES_TH")
.pipe(prepare_for_r0_estimation)
[:w_date]
)
if len(incidence) < MIN_DAYS_r0_ESTIMATE:
used_brazil = True
incidence = (
make_brazil_cases(cases_df)
.pipe(prepare_for_r0_estimation)
[:w_date]
)
Rt = ReproductionNumber(incidence=incidence,
prior_shape=5.12, prior_scale=0.64,
si_pars={'mean': 4.89, 'sd': 1.48},
window_width=MIN_DAYS_r0_ESTIMATE - 2)
Rt.compute_posterior_parameters()
samples = Rt.sample_from_posterior(sample_size=sample_size)
return samples, used_brazil
def make_r0_widgets(defaults=DEFAULT_PARAMS):
r0_inf = st.number_input(
'Limite inferior do número básico de reprodução médio (R0)',
min_value=0.01, max_value=10.0, step=0.25,
value=defaults['r0_dist'][0])
r0_sup = st.number_input(
'Limite superior do número básico de reprodução médio (R0)',
min_value=0.01, max_value=10.0, step=0.25,
value=defaults['r0_dist'][1])
return r0_inf, r0_sup, .95, 'lognorm'
def estimate_subnotification(cases_df, place, date,w_granularity):
if w_granularity == 'city':
city_deaths, city_cases = data.get_city_deaths(place,date)
state = city_cases['state'][0]
if city_deaths < MIN_DEATH_SUBN:
place = state
w_granularity = 'state'
if w_granularity == 'state':
state_deaths, state_cases = data.get_state_cases_and_deaths(place,date)
if state_deaths < MIN_DEATH_SUBN:
w_granularity = 'brazil'
if w_granularity == 'city':
previous_cases = cases_df[place][:date]
# Formatting previous dates
all_dates = | pd.date_range(start=MIN_DATA_BRAZIL, end=date) | pandas.date_range |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = | pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import utilities as util
import sklearn.model_selection as ms
from sklearn.neighbors import KNeighborsClassifier as knnC
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.neighbors import KNeighborsRegressor
from sklearn.grid_search import GridSearchCV
import time
import csv
import os
"""
Input of historical prices, mutual funds available to investor.
Outputs N-day forecast for each mutual fund.
- loads historical data
- transforms historical data for learning
- new features are technical indicators such as Bollinger Bands, Momentum, etc.
"""
def standard_score(df):
# Function to standardize a dataframe to mean of 0 and standard deviation of 1
mean = df.mean()
sd = df.std()
return (df - mean) / sd
def melt_indicators(indicator):
return | pd.melt(indicator) | pandas.melt |
import os
import subprocess
from functools import cached_property
import chess
import chess.engine
import click
import pandas as pd
from chess.engine import _parse_uci_info
from features.abstract import Features
STOCKFISH_PATH = os.environ.get("STOCKFISH_PATH", "../Stockfish/src/stockfish")
EVAL_STOCKFISH_PATH = os.environ.get(
"EVAL_STOCKFISH_PATH", "../Stockfish\ copy/src/stockfish"
)
def stockfish_info(fen, move, engine, depth, multipv=None):
board = chess.Board(fen)
move = [chess.Move.from_uci(move)] if move else None
return engine.analyse(
board, root_moves=move, multipv=multipv, limit=chess.engine.Limit(depth=depth)
)
class Stockfish(Features):
# TODO: create a stockfish class that uses popen and catches all depth evals and best moves.
# TODO: add a version that takes users move and analyzes it.
def __init__(self, fen, engine, depth, multipv):
self.info = stockfish_info(
fen=fen, move=None, engine=engine, depth=depth, multipv=multipv,
)
@classmethod
def from_row(cls, row, engine):
return cls(row.fen, engine)
@classmethod
def from_df(cls, df):
engine = chess.engine.SimpleEngine.popen_uci(STOCKFISH_PATH)
feature_rows = []
with click.progressbar(tuple(df.itertuples()), label=cls.__name__) as rows:
for row in rows:
feature_instance = cls.from_row(row, engine)
feature_rows.append(feature_instance.features())
engine.quit()
return pd.DataFrame(feature_rows)
@cached_property
def best_score(self):
return self.info["score"].relative.score()
@cached_property
def best_mate(self):
return self.info["score"].relative.mate()
@cached_property
def best_move(self):
return self.info["pv"][0].uci()
@cached_property
def best_pv(self):
return str([move.uci() for move in self.info["pv"]])
class Stockfish10(Stockfish):
def __init__(self, fen, engine):
super().__init__(fen, engine, 10, None)
class StockfishDepth(Features):
def __init__(self, fen, p):
p.stdin.write("position fen {}\n".format(fen))
p.stdin.write("go depth 10\n")
board = chess.Board(fen)
self.scores = []
self.mates = []
self.moves = []
self.pvs = []
for line in iter(p.stdout.readline, ""):
if "bestmove" in line:
break
info = _parse_uci_info(line.strip(), board)
self.scores.append(info["score"].relative.score())
self.mates.append(info["score"].relative.mate())
self.moves.append(info["pv"][0].uci())
self.pvs.append(str([move.uci() for move in info["pv"]]))
@classmethod
def from_row(cls, row, p):
return cls(row.fen, p)
@classmethod
def from_df(cls, df):
p = subprocess.Popen(
STOCKFISH_PATH,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
bufsize=1,
)
p.stdout.readline() # read info line on init.
feature_rows = []
with click.progressbar(tuple(df.itertuples()), label=cls.__name__) as rows:
for row in rows:
feature_instance = cls.from_row(row, p)
feature_rows.append(feature_instance.features())
p.kill()
return | pd.DataFrame(feature_rows) | pandas.DataFrame |
import altair as alt
from matplotlib.colors import to_rgba
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import six
def build_dataframe(fields):
field_names = {}
data = pd.DataFrame()
for name, field in six.iteritems(fields):
if field is not None:
if isinstance(field, pd.Series):
fname = field.name
else:
fname = name
data[fname] = field
field_names[name] = fname
else:
field_names[name] = None
return data, field_names
def dtype_to_vega_type(t):
if t == np.dtype('datetime64[ns]'):
return 'temporal'
if t == np.float64 or t == np.int64:
return 'quantitative'
return 'nominal'
def size_chart(chart, size, aspect):
dpi = mpl.rcParams['figure.dpi']
if size:
if isinstance(chart, alt.FacetChart):
chart = chart.spec
chart.height = size*dpi
chart.width = aspect*size*dpi
def vega_color(color):
if isinstance(color, six.string_types) and (color.startswith('rgb(') or color.startswith('rgba(')):
return color
c = to_rgba(color)
return "rgba(%s,%s,%s,%s)" % (int(c[0]*255), int(c[1]*255), int(c[2]*255), c[3])
def vega_palette(palette, color=None, saturation=1, vega_type="nominal"):
if palette:
if isinstance(palette, mpl.colors.Colormap):
pal = palette.colors
else:
pal = sns.color_palette(palette)
elif color:
pal = [color]
elif vega_type == "nominal":
pal = sns.color_palette()
else:
pal = sns.cubehelix_palette(0, as_cmap=True).colors
if saturation < 1:
pal = sns.color_palette(pal, desat=saturation)
pal = sns.color_palette(pal)
return [vega_color(c) for c in pal]
def vega_semantic_type(data):
try:
float_data = data.astype(np.float)
values = np.unique(float_data.dropna())
if np.array_equal(values, np.array([0., 1.])):
return "nominal"
return "quantitative"
except (ValueError, TypeError):
return "nominal"
# From seaborn.categorical
def infer_orient(x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but does not exist in older Pandas
try:
return | pd.api.types.is_categorical_dtype(s) | pandas.api.types.is_categorical_dtype |
# Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as pltcolors
import matplotlib.cm as cmx
import click
from ciml import listener
from ciml import gather_results
import datetime
import itertools
import os
import queue
import re
import sys
import warnings
warnings.filterwarnings("ignore")
try:
from ciml import nn_trainer
from ciml import svm_trainer
from ciml import tf_trainer
except ImportError:
print("Warning: could not import CIML trainers")
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
try:
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.training import proximal_adagrad
_OPTIMIZER_CLS_NAMES = {
'Adagrad': adagrad.AdagradOptimizer,
'Adam': adam.AdamOptimizer,
'Ftrl': ftrl.FtrlOptimizer,
'RMSProp': rmsprop.RMSPropOptimizer,
'SGD': gradient_descent.GradientDescentOptimizer,
'ProximalAdagrad': proximal_adagrad.ProximalAdagradOptimizer
}
except ImportError:
print("Warning: could not import Tensorflow")
_OPTIMIZER_CLS_NAMES = {}
default_db_uri = ('mysql+pymysql://query:<EMAIL>@logstash.<EMAIL>/'
'subunit2sql')
def fixed_lenght_example(result, normalized_length=5500,
aggregation_functions=None):
"""Normalize one example.
Normalize one example of data to a fixed length (L).
The input is s x d.
To achieve fixed lenght:
- if aggregation functions are provided, apply them, or else
- if s > L, cut each dstat column data to L
- if s < L, pad with zeros to reach L
The output is a pd.DataFrame with shape (L, d)
"""
# Fix length of dataset
example = result['dstat']
init_len = len(example)
dstat_keys = example.keys()
if aggregation_functions:
# Run all aggregation functions on each DataFrame column in the example
agg_dict = {column: [x(example[column]) for x in aggregation_functions]
for column
in example.columns}
example = pd.DataFrame.from_dict(agg_dict)
else:
# Cut or pad with zeros
if init_len > normalized_length:
example = example[:normalized_length]
elif init_len < normalized_length:
pad_length = normalized_length - init_len
padd = pd.DataFrame(0, index=np.arange(
pad_length), columns=dstat_keys)
example = pd.concat([example, padd])
return example
def unroll_example(example, normalized_length=5500):
"""Unroll one example
Unroll one example with shape (L, d) to a pd.Series with shape (L * d,)
Labels for the input example are an array with shape (d, ), e.g.:
['usr', 'sys', ... , 'clo']
Labels for the output example are an array with shape (L * d, ), e.g.:
['usr1', ... , 'usrL', ... , 'clo1', ... , 'cloN']
f = L * d is the number of features for the model.
"""
# Unroll the examples
np_vector = example.values.flatten('F')
return pd.Series(np_vector)
def get_class(result, class_label='status'):
"""Get a normalized result for the specified class.
Get a normalized result for the specified class. Currently supported
classes are only one, 'status'. This returns a single value which
defines the class the example belongs to.
"""
if class_label == 'status':
status = result['status']
passed_statuses = [0, 'Success']
status = 0 if status in passed_statuses else 1
return status
elif class_label == 'node_provider':
provider = result['node_provider']
if provider.startswith('rax'):
return 'rax'
elif provider.startswith('ovh'):
return 'ovh'
elif provider.startswith('vexxhost'):
return 'vexxhost'
else:
return provider
elif class_label == 'node_provider_all':
return result['node_provider']
else:
return result[class_label]
def normalize_example(result, normalized_length=5500, class_label='status'):
"""Normalize and unroll one example.
Invokes fixed_lenght_example and unroll_example.
Returns the unrolled vector, the single integer that represent that status
for the example, and the list of labels.
"""
example = fixed_lenght_example(result, normalized_length)
# Normalize status
status = get_class(result, class_label)
vector = unroll_example(example, normalized_length)
return vector, status
def filter_example(result, features_regex):
"""Filters the dstat data by features_regex"""
# Apply the dstat feature filter
dstat_data = result['dstat']
col_regex = re.compile(features_regex)
result['dstat'] = dstat_data[list(filter(
col_regex.search, dstat_data.columns))]
return result
def unroll_labels(dstat_labels, normalized_length=5500):
"""Build labels for the unrolled example from labels and num of samples"""
return [label + str(idx) for label, idx in itertools.product(
dstat_labels, range(normalized_length))]
def unroll_labels_names(dstat_labels, aggregation_functions):
"""Build labels for the unrolled example from lables and agg fns"""
return [label + '_' + fn for label, fn in itertools.product(
dstat_labels, aggregation_functions)]
def examples_ndarray(num_examples, num_dstat_features, normalized_length):
# Setup the numpy matrix and sizes (this is done once)
return np.ndarray(
shape=(num_examples, num_dstat_features * normalized_length))
def normalize_dataset(examples, labels, params=None):
"""Normalize features in a dataset
Normalize each feature in a dataset. If e is the number of examples we have
in the dataset, and f is the number of features, this takes as input an
np ndarray with shape (e, f).
The output is an np ndarray with shape (e, f) where data for each feature
is normalized based on values across the examples.
The normalization formula is x = (x - mean(X)/(max(X) - min(X)), where X
is the vector of feature values across examples, and x is any element of X.
"""
_features = np.ndarray(shape=(examples.shape[1], examples.shape[0]))
params = params or {}
for n in range(len(labels)):
print("Normalizing feature %d of %d" % (
n + 1, len(labels)), end='\r', flush=True)
feature_data = examples[:, n]
if labels[n] in params:
mean_fd, max_min_fd = params[labels[n]]
else:
mean_fd = np.mean(feature_data)
max_min_fd = np.max(feature_data) - np.min(feature_data)
# In case of just one example, or
if max_min_fd == 0:
max_min_fd = 1
params[labels[n]] = (mean_fd, max_min_fd)
_features[n] = list(
map(lambda x: (x - mean_fd) / max_min_fd, feature_data))
print(flush=True)
return _features.transpose(), params
def get_downsampled_example_lenght(sample_interval, normalized_length=5500):
"""Returns the normalized lenght for a downsampled example
Returns the normalized example lenght based on the normalized lenght for
a full sample and the sample interval.
"""
rng = pd.date_range('1/1/2012', periods=normalized_length, freq='S')
ts = pd.Series(np.ones(len(rng)), index=rng)
ts = ts.resample(sample_interval).sum()
return ts.shape[0]
def data_sizes_and_labels(sample_run, features_regex, sample_interval='1s',
aggregation_functions=None, data_path=None, s3=None):
"""Takes a sample run from a dataset and filters and does calculations
If aggregation functions are used, the number of features is the
number of aggregation functions x the number of dstat features
Returns:
- the normalized example lenght
- the number of dstat features
- the unrolled labels
"""
# Normalized lenght before resampling
normalized_length = 5500
if aggregation_functions:
normalized_length = len(aggregation_functions)
elif sample_interval:
# Calculate the desired normalized lenght after resample
normalized_length = get_downsampled_example_lenght(
sample_interval, normalized_length)
# Load the list of runs and base labels
sample_result = gather_results.get_subunit_results_for_run(
sample_run, sample_interval, data_path=data_path, s3=s3)
filtered_sample_result = filter_example(sample_result, features_regex)
filtered_dstat_labels = filtered_sample_result['dstat'].columns
if aggregation_functions:
unrolled_labels = unroll_labels_names(filtered_dstat_labels,
aggregation_functions)
else:
unrolled_labels = unroll_labels(filtered_dstat_labels,
normalized_length)
return normalized_length, len(filtered_dstat_labels), unrolled_labels
def prepare_dataset(dataset, normalized_length, num_dstat_features, data_type,
features_regex, sample_interval='1s', class_label='status',
aggregation_functions=None, visualize=False, data_path=None,
target_data_path=None, s3=None):
"""Takes a dataset and filters and does the magic
Loads the run ids from the dataset configuration.
Loads the data (dsv + meta) for every run from cache.
Builds the unrolled examples as a numpy ndarray.
Builds the classes as a numpy array.
Saves the data setup to the dataset config.
Does some visualization (if enabled).
"""
if visualize:
data_plots_folder = [os.path.dirname(
os.path.realpath(__file__)), os.pardir, 'data', dataset, 'plots']
os.makedirs(os.sep.join(data_plots_folder), exist_ok=True)
# Load the list of runs and base labels
runs = gather_results.load_run_uuids(dataset, name=data_type,
data_path=target_data_path, s3=s3)
# run_uuids are the example_ids
sizes = []
# The data for each example.
examples = examples_ndarray(len(runs), num_dstat_features,
normalized_length)
# The test result for each example
classes = []
skips = []
print("Loading %s data:" % data_type, end='\r', flush=True)
for count, run in enumerate(runs):
print("Loading %s data: %d of %d" % (data_type, count + 1, len(runs)),
end='\r', flush=True)
result = gather_results.get_subunit_results_for_run(
run, sample_interval, data_path=data_path, s3=s3)
# For one run_uuid we must only get on example (result)
# Filtering by columns
if not result:
skips.append(run.uuid)
continue
# Apply column filtering
result = filter_example(result, features_regex)
# Normalize data
example = fixed_lenght_example(result, normalized_length,
aggregation_functions)
vector = unroll_example(example, normalized_length)
# Normalize status
status = get_class(result, class_label)
# Examples is an np ndarrays
examples[count] = vector.values
classes.append(status)
# Plot from figures
if visualize and not aggregation_functions:
# Prepare some more data if we are going to visualize
sizes.append((result['dstat'].shape[0], status))
figure_name = sample_interval + "_%s_" + str(count)
# Plot un-normalized data
data_plot = result['dstat'].plot()
fig = data_plot.get_figure()
fig.savefig(os.sep.join(
data_plots_folder + [figure_name % "downsampled"]))
plt.close(fig)
# Plot fixed size data
fixed_plot = example.plot()
fig = fixed_plot.get_figure()
fig.savefig(os.sep.join(
data_plots_folder + [figure_name % "fixedsize"]))
plt.close(fig)
# Plot unrolled data
unrolled_plot = | pd.Series(vector) | pandas.Series |
# Adapted from https://github.com/mirnylab/cooler
import simplejson as json
import six
import os
import re
from contextlib import contextmanager
from pandas.api.types import is_integer_dtype
from scipy.sparse import coo_matrix
import numpy as np
import pandas as pd
import h5py
# The 4DN data portal and hic2cool store these weight vectors in divisive form
_4DN_DIVISIVE_WEIGHTS = {"KR", "VC", "VC_SQRT"}
@contextmanager
def open_hdf5(fp, mode="r", *args, **kwargs):
"""
Context manager like ``h5py.File`` but accepts already open HDF5 file
handles which do not get closed on teardown.
Parameters
----------
fp : str or ``h5py.File`` object
If an open file object is provided, it passes through unchanged,
provided that the requested mode is compatible.
If a filepath is passed, the context manager will close the file on
tear down.
mode : str
* r Readonly, file must exist
* r+ Read/write, file must exist
* a Read/write if exists, create otherwise
* w Truncate if exists, create otherwise
* w- or x Fail if exists, create otherwise
"""
if isinstance(fp, six.string_types):
own_fh = True
fh = h5py.File(fp, mode, *args, **kwargs)
else:
own_fh = False
if mode == "r" and fp.file.mode == "r+":
# warnings.warn("File object provided is writeable but intent is read-only")
pass
elif mode in ("r+", "a") and fp.file.mode == "r":
raise ValueError("File object provided is not writeable")
elif mode == "w":
raise ValueError("Cannot truncate open file")
elif mode in ("w-", "x"):
raise ValueError("File exists")
fh = fp
try:
yield fh
finally:
if own_fh:
fh.close()
class closing_hdf5(h5py.Group):
def __init__(self, grp):
super(closing_hdf5, self).__init__(grp.id)
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.file.close()
def close(self):
self.file.close()
class TreeNode(object):
def __init__(self, obj, depth=0, level=None):
self.obj = obj
self.depth = depth
self.level = level
def get_type(self):
return type(self.obj).__name__
def get_children(self):
if hasattr(self.obj, "values"):
if self.level is None or self.depth < self.level:
depth = self.depth + 1
children = self.obj.values()
return [
self.__class__(o, depth=depth, level=self.level) for o in children
]
return []
def get_text(self):
name = self.obj.name.split("/")[-1] or "/"
if hasattr(self.obj, "shape"):
name += " {} {}".format(self.obj.shape, self.obj.dtype)
return name
MAGIC = u"HDF5::Cooler"
URL = u"https://github.com/mirnylab/cooler"
def _is_cooler(grp):
fmt = grp.attrs.get("format", None)
url = grp.attrs.get("format-url", None)
if fmt == MAGIC or url == URL:
keys = ("chroms", "bins", "pixels", "indexes")
if not all(name in grp.keys() for name in keys):
print("Cooler path {} appears to be corrupt".format(grp.name))
return True
return False
def visititems(group, func, level=None):
"""Like :py:method:`h5py.Group.visititems`, but much faster somehow.
"""
def _visititems(node, func, result=None):
children = node.get_children()
if children:
for child in children:
result[child.obj.name] = func(child.obj.name, child.obj)
_visititems(child, func, result)
return result
root = TreeNode(group, level=level)
return _visititems(root, func, {})
def natsort_key(s, _NS_REGEX=re.compile(r"(\d+)", re.U)):
return tuple([int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x])
def natsorted(iterable):
return sorted(iterable, key=natsort_key)
def list_coolers(filepath):
"""
List group paths to all cooler data collections in a file.
Parameters
----------
filepath : str
Returns
-------
list
Cooler group paths in the file.
"""
if not h5py.is_hdf5(filepath):
raise OSError("'{}' is not an HDF5 file.".format(filepath))
listing = []
def _check_cooler(pth, grp):
if _is_cooler(grp):
listing.append("/" + pth if not pth.startswith("/") else pth)
with h5py.File(filepath, "r") as f:
_check_cooler("/", f)
visititems(f, _check_cooler)
return natsorted(listing)
def parse_cooler_uri(s):
"""
Parse a Cooler URI string
e.g. /path/to/mycoolers.cool::/path/to/cooler
"""
parts = s.split("::")
if len(parts) == 1:
file_path, group_path = parts[0], "/"
elif len(parts) == 2:
file_path, group_path = parts
if not group_path.startswith("/"):
group_path = "/" + group_path
else:
raise ValueError("Invalid Cooler URI string")
return file_path, group_path
def parse_humanized(s):
_NUMERIC_RE = re.compile("([0-9,.]+)")
_, value, unit = _NUMERIC_RE.split(s.replace(",", ""))
if not len(unit):
return int(value)
value = float(value)
unit = unit.upper().strip()
if unit in ("K", "KB"):
value *= 1000
elif unit in ("M", "MB"):
value *= 1000000
elif unit in ("G", "GB"):
value *= 1000000000
else:
raise ValueError("Unknown unit '{}'".format(unit))
return int(value)
def parse_region_string(s):
"""
Parse a UCSC-style genomic region string into a triple.
Parameters
----------
s : str
UCSC-style string, e.g. "chr5:10,100,000-30,000,000". Ensembl and FASTA
style sequence names are allowed. End coordinate must be greater than
or equal to start.
Returns
-------
(str, int or None, int or None)
"""
def _tokenize(s):
token_spec = [
("HYPHEN", r"-"),
("COORD", r"[0-9,]+(\.[0-9]*)?(?:[a-z]+)?"),
("OTHER", r".+"),
]
tok_regex = r"\s*" + r"|\s*".join(r"(?P<%s>%s)" % pair for pair in token_spec)
tok_regex = re.compile(tok_regex, re.IGNORECASE)
for match in tok_regex.finditer(s):
typ = match.lastgroup
yield typ, match.group(typ)
def _check_token(typ, token, expected):
if typ is None:
raise ValueError("Expected {} token missing".format(" or ".join(expected)))
else:
if typ not in expected:
raise ValueError('Unexpected token "{}"'.format(token))
def _expect(tokens):
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["COORD"])
start = parse_humanized(token)
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["HYPHEN"])
typ, token = next(tokens, (None, None))
if typ is None:
return start, None
_check_token(typ, token, ["COORD"])
end = parse_humanized(token)
if end < start:
raise ValueError("End coordinate less than start")
return start, end
parts = s.split(":")
chrom = parts[0].strip()
if not len(chrom):
raise ValueError("Chromosome name cannot be empty")
if len(parts) < 2:
return (chrom, None, None)
start, end = _expect(_tokenize(parts[1]))
return (chrom, start, end)
def parse_region(reg, chromsizes=None):
"""
Genomic regions are represented as half-open intervals (0-based starts,
1-based ends) along the length coordinate of a contig/scaffold/chromosome.
Parameters
----------
reg : str or tuple
UCSC-style genomic region string, or
Triple (chrom, start, end), where ``start`` or ``end`` may be ``None``.
chromsizes : mapping, optional
Lookup table of scaffold lengths to check against ``chrom`` and the
``end`` coordinate. Required if ``end`` is not supplied.
Returns
-------
A well-formed genomic region triple (str, int, int)
"""
if isinstance(reg, six.string_types):
chrom, start, end = parse_region_string(reg)
else:
chrom, start, end = reg
start = int(start) if start is not None else start
end = int(end) if end is not None else end
try:
clen = chromsizes[chrom] if chromsizes is not None else None
except KeyError:
raise ValueError("Unknown sequence label: {}".format(chrom))
start = 0 if start is None else start
if end is None:
if clen is None: # TODO --- remove?
raise ValueError("Cannot determine end coordinate.")
end = clen
if end < start:
raise ValueError("End cannot be less than start")
if start < 0 or (clen is not None and end > clen):
raise ValueError("Genomic region out of bounds: [{}, {})".format(start, end))
return chrom, start, end
class Cooler(object):
"""
A convenient interface to a cooler data collection.
Parameters
----------
store : str, :py:class:`h5py.File` or :py:class:`h5py.Group`
Path to a cooler file, URI string, or open handle to the root HDF5
group of a cooler data collection.
root : str, optional [deprecated]
HDF5 Group path to root of cooler group if ``store`` is a file.
This option is deprecated. Instead, use a URI string of the form
:file:`<file_path>::<group_path>`.
kwargs : optional
Options to be passed to :py:class:`h5py.File()` upon every access.
By default, the file is opened with the default driver and mode='r'.
Notes
-----
If ``store`` is a file path, the file will be opened temporarily in
when performing operations. This allows :py:class:`Cooler` objects to be
serialized for multiprocess and distributed computations.
Metadata is accessible as a dictionary through the :py:attr:`info`
property.
Table selectors, created using :py:meth:`chroms`, :py:meth:`bins`, and
:py:meth:`pixels`, perform range queries over table rows,
returning :py:class:`pd.DataFrame` and :py:class:`pd.Series`.
A matrix selector, created using :py:meth:`matrix`, performs 2D matrix
range queries, returning :py:class:`numpy.ndarray` or
:py:class:`scipy.sparse.coo_matrix`.
"""
def __init__(self, store, root=None, **kwargs):
if isinstance(store, six.string_types):
if root is None:
self.filename, self.root = parse_cooler_uri(store)
elif h5py.is_hdf5(store):
with open_hdf5(store, **kwargs) as h5:
self.filename = h5.file.filename
self.root = root
else:
raise ValueError("Not a valid path to a Cooler file")
self.uri = self.filename + "::" + self.root
self.store = self.filename
self.open_kws = kwargs
else:
# Assume an open HDF5 handle, ignore open_kws
self.filename = store.file.filename
self.root = store.name
self.uri = self.filename + "::" + self.root
self.store = store.file
self.open_kws = {}
self._refresh()
def _refresh(self):
try:
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
_ct = chroms(grp)
_ct["name"] = _ct["name"].astype(object)
self._chromsizes = _ct.set_index("name")["length"]
self._chromids = dict(zip(_ct["name"], range(len(_ct))))
self._info = info(grp)
mode = self._info.get("storage-mode", u"symmetric-upper")
self._is_symm_upper = mode == u"symmetric-upper"
except KeyError:
err_msg = "No cooler found at: {}.".format(self.store)
listing = list_coolers(self.store)
if len(listing):
err_msg += (
" Coolers found in {}. ".format(listing)
+ "Use '::' to specify a group path"
)
raise KeyError(err_msg)
def _load_dset(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return grp[path][:]
def _load_attrs(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return dict(grp[path].attrs)
def open(self, mode="r", **kwargs):
""" Open the HDF5 group containing the Cooler with :py:mod:`h5py`
Functions as a context manager. Any ``open_kws`` passed during
construction are ignored.
Parameters
----------
mode : str, optional [default: 'r']
* ``'r'`` (readonly)
* ``'r+'`` or ``'a'`` (read/write)
Notes
-----
For other parameters, see :py:class:`h5py.File`.
"""
grp = h5py.File(self.filename, mode, **kwargs)[self.root]
return closing_hdf5(grp)
@property
def storage_mode(self):
"""Indicates whether ordinary sparse matrix encoding is used
(``"square"``) or whether a symmetric matrix is encoded by storing only
the upper triangular elements (``"symmetric-upper"``).
"""
return self._info.get("storage-mode", u"symmetric-upper")
@property
def binsize(self):
""" Resolution in base pairs if uniform else None """
return self._info["bin-size"]
@property
def chromsizes(self):
""" Ordered mapping of reference sequences to their lengths in bp """
return self._chromsizes
@property
def chromnames(self):
""" List of reference sequence names """
return list(self._chromsizes.index)
def offset(self, region):
""" Bin ID containing the left end of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
int
Examples
--------
# >>> c.offset('chr3') # doctest: +SKIP
1311
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_offset(
grp, self._chromids, parse_region(region, self._chromsizes)
)
def extent(self, region):
""" Bin IDs containing the left and right ends of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
2-tuple of ints
Examples
--------
# >>> c.extent('chr3') # doctest: +SKIP
(1311, 2131)
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
@property
def info(self):
""" File information and metadata
Returns
-------
dict
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return info(grp)
@property
def shape(self):
return (self._info["nbins"],) * 2
def chroms(self, **kwargs):
""" Chromosome table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return chroms(grp, lo, hi, fields, **kwargs)
return RangeSelector1D(None, _slice, None, self._info["nchroms"])
def bins(self, **kwargs):
""" Bin table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return bins(grp, lo, hi, fields, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
return RangeSelector1D(None, _slice, _fetch, self._info["nbins"])
def pixels(self, join=False, **kwargs):
""" Pixel table selector
Parameters
----------
join : bool, optional
Whether to expand bin ID columns into chrom, start, and end
columns. Default is ``False``.
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return pixels(grp, lo, hi, fields, join, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
i0, i1 = region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
lo = grp["indexes"]["bin1_offset"][i0]
hi = grp["indexes"]["bin1_offset"][i1]
return lo, hi
return RangeSelector1D(None, _slice, _fetch, self._info["nnz"])
def matrix(
self,
field=None,
balance=True,
sparse=False,
as_pixels=False,
join=False,
ignore_index=True,
divisive_weights=None,
max_chunk=500000000,
):
""" Contact matrix selector
Parameters
----------
field : str, optional
Which column of the pixel table to fill the matrix with. By
default, the 'count' column is used.
balance : bool, optional
Whether to apply pre-calculated matrix balancing weights to the
selection. Default is True and uses a column named 'weight'.
Alternatively, pass the name of the bin table column containing
the desired balancing weights. Set to False to return untransformed
counts.
sparse: bool, optional
Return a scipy.sparse.coo_matrix instead of a dense 2D numpy array.
as_pixels: bool, optional
Return a DataFrame of the corresponding rows from the pixel table
instead of a rectangular sparse matrix. False by default.
join : bool, optional
If requesting pixels, specifies whether to expand the bin ID
columns into (chrom, start, end). Has no effect when requesting a
rectangular matrix. Default is True.
ignore_index : bool, optional
If requesting pixels, don't populate the index column with the
pixel IDs to improve performance. Default is True.
divisive_weights : bool, optional
Force balancing weights to be interpreted as divisive (True) or
multiplicative (False). Weights are always assumed to be
multiplicative by default unless named KR, VC or SQRT_VC, in which
case they are assumed to be divisive by default.
Returns
-------
Matrix selector
Notes
-----
If ``as_pixels=True``, only data explicitly stored in the pixel table
will be returned: if the cooler's storage mode is symmetric-upper,
lower triangular elements will not be generated. If
``as_pixels=False``, those missing non-zero elements will
automatically be filled in.
"""
if balance in _4DN_DIVISIVE_WEIGHTS and divisive_weights is None:
divisive_weights = True
def _slice(field, i0, i1, j0, j1):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return matrix(
grp,
i0,
i1,
j0,
j1,
field,
balance,
sparse,
as_pixels,
join,
ignore_index,
divisive_weights,
max_chunk,
self._is_symm_upper,
)
def _fetch(region, region2=None):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
if region2 is None:
region2 = region
region1 = parse_region(region, self._chromsizes)
region2 = parse_region(region2, self._chromsizes)
i0, i1 = region_to_extent(grp, self._chromids, region1)
j0, j1 = region_to_extent(grp, self._chromids, region2)
return i0, i1, j0, j1
return RangeSelector2D(field, _slice, _fetch, (self._info["nbins"],) * 2)
def __repr__(self):
if isinstance(self.store, six.string_types):
filename = os.path.basename(self.store)
container = "{}::{}".format(filename, self.root)
else:
container = repr(self.store)
return '<Cooler "{}">'.format(container)
def _region_to_extent(h5, chrom_ids, region, binsize):
chrom, start, end = region
cid = chrom_ids[chrom]
if binsize is not None:
chrom_offset = h5["indexes"]["chrom_offset"][cid]
yield chrom_offset + int(np.floor(start / binsize))
yield chrom_offset + int(np.ceil(end / binsize))
else:
chrom_lo = h5["indexes"]["chrom_offset"][cid]
chrom_hi = h5["indexes"]["chrom_offset"][cid + 1]
chrom_bins = h5["bins"]["start"][chrom_lo:chrom_hi]
yield chrom_lo + np.searchsorted(chrom_bins, start, "right") - 1
yield chrom_lo + np.searchsorted(chrom_bins, end, "left")
def region_to_offset(h5, chrom_ids, region, binsize=None):
return next(_region_to_extent(h5, chrom_ids, region, binsize))
def region_to_extent(h5, chrom_ids, region, binsize=None):
return tuple(_region_to_extent(h5, chrom_ids, region, binsize))
def get(grp, lo=0, hi=None, fields=None, convert_enum=True, as_dict=False):
"""
Query a range of rows from a table as a dataframe.
A table is an HDF5 group containing equal-length 1D datasets serving as
columns.
Parameters
----------
grp : ``h5py.Group`` or any dict-like of array-likes
Handle to an HDF5 group containing only 1D datasets or any similar
collection of 1D datasets or arrays
lo, hi : int, optional
Range of rows to select from the table.
fields : str or sequence of str, optional
Column or list of columns to query. Defaults to all available columns.
A single string returns a Series instead of a DataFrame.
convert_enum : bool, optional
Whether to convert HDF5 enum datasets into ``pandas.Categorical``
columns instead of plain integer columns. Default is True.
kwargs : optional
Options to pass to ``pandas.DataFrame`` or ``pandas.Series``.
Returns
-------
DataFrame or Series
Notes
-----
HDF5 ASCII datasets are converted to Unicode.
"""
series = False
if fields is None:
fields = list(grp.keys())
elif isinstance(fields, six.string_types):
fields = [fields]
series = True
data = {}
for field in fields:
dset = grp[field]
if convert_enum:
dt = h5py.check_dtype(enum=dset.dtype)
else:
dt = None
if dt is not None:
data[field] = pd.Categorical.from_codes(
dset[lo:hi], sorted(dt, key=dt.__getitem__), ordered=True
)
elif dset.dtype.type == np.string_:
data[field] = dset[lo:hi].astype("U")
else:
data[field] = dset[lo:hi]
if as_dict:
return data
if data and lo is not None:
index = np.arange(lo, lo + len(next(iter(data.values()))))
else:
index = None
if series:
return pd.Series(data[fields[0]], index=index, name=field)
else:
return | pd.DataFrame(data, columns=fields, index=index) | pandas.DataFrame |
'''Combines oslo bors and yahoo data'''
import numpy as np
import pandas as pd
from pprint import pprint
import scrapeconfig as cng
def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str):
'''
Get filenames for csv files from Oslo Bors and Yahoo Finance and merges them
to one large dataset.
'''
df_bors = | pd.read_csv(bors_name) | pandas.read_csv |
import pandas as pd
from xml.dom import minidom
def getText(nodelist):
"""Helper function to return the text content from an XML node, joined as a single string.
"""
rc = []
for node in nodelist:
if node.nodeType == minidom.Node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def process_tuv(tuv):
"""Function to process a single TMX 'TUV' unit - a unit of text in a particular language.
Args:
tuv (Node): The <tuv> node to process.
Returns:
lang (String): The locale/language code of the <tuv> element.
txt (String): The text contained in the <tuv> element.
"""
if 'lang' in tuv.attributes:
lang = tuv.attributes['lang'].value
else:
lang = tuv.attributes['xml:lang'].value
seg = tuv.getElementsByTagName('seg')[0]
# If the node has direct text content data, process it as a string
if hasattr(seg.childNodes[0], 'data'):
txt = seg.childNodes[0].data
# If it doesn't have a 'data' attribute, it most likely contains child tags such as placeholders (<ph>). Therefore, include these as XML strings.
else:
if len(seg.childNodes) > 0 :
txt = getText(seg.childNodes)
else:
print("no child nodes")
return lang, txt
def read(path):
"""Read function takes in a path to TMX translation file and outputs the metadata and a pandas dataframe.
Args:
param1 (str): The path to the TMX translation file
Returns:
dict: The header of the TMX file, which contains metadata
DataFrame: A Pandas Dataframe. The column names will be the locale/language codes, and the row content will be the translations for each locale.
"""
# parse an xml file by name
tmx = minidom.parse(path)
# Get metadata
metadata = {}
header = tmx.getElementsByTagName('header')[0]
for key in header.attributes.keys():
metadata[key] = header.attributes[key].value
srclang = metadata['srclang']
# Get translation sentences
body = tmx.getElementsByTagName('body')[0]
translation_units = body.getElementsByTagName('tu')
items = []
for tu in translation_units:
tuvs = tu.getElementsByTagName('tuv')
tudata = {}
for tuv in tuvs:
lang, sentence = process_tuv(tuv)
tudata[lang] = sentence
items.append(tudata)
df = | pd.DataFrame(items) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 13:59:31 2020
@author: bernifoellmer
"""
import sys, os
import pandas as pd
import openpyxl
import ntpath
import datetime
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles import colors
from openpyxl.styles import Protection
from openpyxl.styles import PatternFill
from glob import glob
from shutil import copyfile
from cta import update_table
#from discharge_extract import extract_specific_tags_df
from discharge_ncs import discharge_ncs
import numpy as np
from collections import defaultdict
from ActiveLearner import ActiveLearner, DISCHARGEFilter
#from featureSelection import featureSelection
from openpyxl.utils import get_column_letter
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
from CTDataStruct import CTPatient
import keyboard
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from numpy.random import shuffle
from openpyxl.styles.differential import DifferentialStyle
from openpyxl import Workbook
from openpyxl.styles import Color, PatternFill, Font, Border
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting.rule import ColorScaleRule, CellIsRule, FormulaRule
from openpyxl.formatting import Rule
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def computeCTA(settings):
print('computeCTA')
#folderpath_master = 'H:/cloud/cloud_data/Projects/CACSFilter/data/discharge_master/discharge_master_01042020'
#date = folderpath_master.split('_')[-1]
#folderpath_components = os.path.join(folderpath_master, 'discharge_components_' + date)
#folderpath_sources = os.path.join(folderpath_master, 'discharge_sources_' + date)
#filepath_master = os.path.join(folderpath_master, 'discharge_master_' + date + '.xlsx')
#filepath_data = os.path.join(folderpath_components, 'discharge_data_' + date + '.xlsx')
filepath_dicom = settings['filepath_dicom']
filepath_master = settings['filepath_data']
filepath_ITT = settings['filepath_ITT']
filepath_phase_exclude_stenosis = settings['filepath_phase_exclude_stenosis']
filepath_stenosis_bigger_20_phases = settings['filepath_stenosis_bigger_20_phases']
filepath_prct = settings['filepath_prct']
filepath_ecrf = settings['filepath_ecrf']
#filepath_master = 'H:/cloud/cloud_data/Projects/CACSFilter/data/discharge_master/discharge_master_01042020/discharge_master_01042020.xlsx'
#filepath_master = 'H:/cloud/cloud_data/Projects/CACSFilter/data/discharge_master/discharge_master_01042020/discharge_master_01042020.xlsx'
df_discharge = pd.read_excel(filepath_dicom)
df_master = pd.read_pickle(filepath_master)
df_ITT = pd.read_excel(filepath_ITT)
df_phase_exclude_stenosis = pd.read_excel(filepath_phase_exclude_stenosis)
df_stenosis_bigger_20_phase = pd.read_excel(filepath_stenosis_bigger_20_phases)
df_prct = pd.read_excel(filepath_prct)
df_ecrf = pd.read_excel(filepath_ecrf)
# 34
df_ecrf_tmp = df_ecrf[df_ecrf['1. Date of CT scan'].notna()]
df_ITT_tmp = df_ITT[~(df_ITT['ITT']==2)]
ct_ecrf = pd.merge(df_ecrf_tmp, df_ITT_tmp, left_on='Patient identifier', right_on='ID')[['Patient identifier', '1. Date of CT scan']].drop_duplicates('Patient identifier')
ct_ecrf = ct_ecrf.rename(columns={'1. Date of CT scan': 'EcrfDate'})
# 37
df_phase_exclude_stenosis_tmp = df_phase_exclude_stenosis[df_phase_exclude_stenosis['phase_i0011'].notna()]
phase_no_stenosis = df_phase_exclude_stenosis_tmp[['mnpaid', 'phase_i0011']]
phase_no_stenosis = phase_no_stenosis.rename(columns={'mnpaid': 'PatientID', 'phase_i0011': 'phase'})
phase_no_stenosis['arteries'] = 'LAD,RCA,LCX,LMA'
#40
# create view phase_with_stenosis1 as select mnpaid as PatientID, `sten_i0231 (Phase #1)` as phase, concat_ws(',',if(LAD=1,'LAD', null),if(RCA=1,'RCA',null),if(LMA=1,'LMA',null),if(LCX=1,'LCX',null)) as arteries from stenosis_bigger_20_phases where `sten_i0231 (Phase #1)` is not null;
df_stenosis_bigger_20_phase_tmp = df_stenosis_bigger_20_phase[df_stenosis_bigger_20_phase['sten_i0231 (Phase #1)'].notna()]
df_stenosis_bigger_20_phase_tmp = df_stenosis_bigger_20_phase_tmp.reset_index(drop=True)
phase_with_stenosis1 = df_stenosis_bigger_20_phase_tmp[['mnpaid', 'sten_i0231 (Phase #1)']]
arteries_tmp=pd.DataFrame('', index=np.arange(len(phase_with_stenosis1)), columns=['arteries'])
for index, row in arteries_tmp.iterrows():
s=''
if df_stenosis_bigger_20_phase_tmp.loc[index,'LAD']==1:
s = s + ',LAD'
if df_stenosis_bigger_20_phase_tmp.loc[index,'RCA']==1:
s = s + ',RCA'
if df_stenosis_bigger_20_phase_tmp.loc[index,'LMA']==1:
s = s + ',LMA'
if df_stenosis_bigger_20_phase_tmp.loc[index,'LCX']==1:
s = s + ',LCX'
if len(s)==0:
arteries_tmp.loc[index,'arteries'] = np.nan
else:
arteries_tmp.loc[index,'arteries'] = s[1:]
phase_with_stenosis1['arteries'] = arteries_tmp
phase_with_stenosis1 = phase_with_stenosis1.rename(columns={'mnpaid': 'PatientID', 'sten_i0231 (Phase #1)': 'phase'})
# 41
df_stenosis_bigger_20_phase_tmp = df_stenosis_bigger_20_phase[df_stenosis_bigger_20_phase['sten_i0241'].notna()]
df_stenosis_bigger_20_phase_tmp = df_stenosis_bigger_20_phase_tmp.reset_index(drop=True)
phase_with_stenosis2 = df_stenosis_bigger_20_phase_tmp[['mnpaid', 'sten_i0241']]
arteries_tmp=pd.DataFrame('', index=np.arange(len(phase_with_stenosis2)), columns=['arteries'])
for index, row in arteries_tmp.iterrows():
s=''
if df_stenosis_bigger_20_phase_tmp.loc[index,'LAD']==1:
s = s + ',LAD'
if df_stenosis_bigger_20_phase_tmp.loc[index,'RCA']==1:
s = s + ',RCA'
if df_stenosis_bigger_20_phase_tmp.loc[index,'LMA']==1:
s = s + ',LMA'
if df_stenosis_bigger_20_phase_tmp.loc[index,'LCX']==1:
s = s + ',LCX'
if len(s)==0:
arteries_tmp.loc[index,'arteries'] = np.nan
else:
arteries_tmp.loc[index,'arteries'] = s[1:]
phase_with_stenosis2['arteries'] = arteries_tmp
phase_with_stenosis2 = phase_with_stenosis2.rename(columns={'mnpaid': 'PatientID', 'sten_i0241': 'phase'})
# 42
df_stenosis_bigger_20_phase_tmp = df_stenosis_bigger_20_phase[df_stenosis_bigger_20_phase['sten_i0251'].notna()]
df_stenosis_bigger_20_phase_tmp = df_stenosis_bigger_20_phase_tmp.reset_index(drop=True)
phase_with_stenosis3 = df_stenosis_bigger_20_phase_tmp[['mnpaid', 'sten_i0251']]
arteries_tmp=pd.DataFrame('', index=np.arange(len(phase_with_stenosis3)), columns=['arteries'])
for index, row in arteries_tmp.iterrows():
s=''
if df_stenosis_bigger_20_phase_tmp.loc[index,'LAD']==1:
s = s + ',LAD'
if df_stenosis_bigger_20_phase_tmp.loc[index,'RCA']==1:
s = s + ',RCA'
if df_stenosis_bigger_20_phase_tmp.loc[index,'LMA']==1:
s = s + ',LMA'
if df_stenosis_bigger_20_phase_tmp.loc[index,'LCX']==1:
s = s + ',LCX'
if len(s)==0:
arteries_tmp.loc[index,'arteries'] = np.nan
else:
arteries_tmp.loc[index,'arteries'] = s[1:]
phase_with_stenosis3['arteries'] = arteries_tmp
phase_with_stenosis3 = phase_with_stenosis3.rename(columns={'mnpaid': 'PatientID', 'sten_i0251': 'phase'})
print('computeCTA01')
# 43
# create view phase_information as select * from phase_no_stenosis union select * from phase_with_stenosis1 union select * from phase_with_stenosis2 union select * from phase_with_stenosis3;
phase_information = pd.concat([phase_no_stenosis, phase_with_stenosis1, phase_with_stenosis2, phase_with_stenosis3], axis=0).drop_duplicates()
# 47
# create view rca_double as select PatientID, group_concat(distinct phase), count(distinct phase) from phase_information where instr(arteries,'RCA') group by PatientID having count(distinct phase)>1;
phase_information_tmp = phase_information.replace(to_replace=[np.nan], value='', inplace=False)
phase_information_tmp = phase_information_tmp[phase_information_tmp['arteries'].str.contains('RCA')]
rca_double=pd.DataFrame(columns=['PatientID', 'group_concat(distinct phase)', 'count(distinct phase)'])
patients = phase_information_tmp['PatientID'].unique()
# Error for 29-TUR-0005 (LAD,RCA,LCX), (RCA) ???
for pat in patients:
df_pat = phase_information_tmp[phase_information_tmp['PatientID']==pat]
s=''
for index, row in df_pat.iterrows():
s = s + ',' + str(int(row['phase']))
if len(s)>0:
s = s[1:]
count=df_pat['arteries'].value_counts().max()
if count>1:
rca_double = rca_double.append({'PatientID':pat, 'group_concat(distinct phase)':s, 'count(distinct phase)':count}, ignore_index=True)
# 48
phase_information_tmp = phase_information.replace(to_replace=[np.nan], value='', inplace=False)
phase_information_tmp = phase_information_tmp[phase_information_tmp['arteries'].str.contains('LMA')]
lma_double=pd.DataFrame(columns=['PatientID', 'group_concat(distinct phase)', 'count(distinct phase)'])
patients = phase_information_tmp['PatientID'].unique()
# Error for 29-TUR-0005 (LAD,RCA,LCX), (RCA) ???
for pat in patients:
df_pat = phase_information_tmp[phase_information_tmp['PatientID']==pat]
s=''
for index, row in df_pat.iterrows():
s = s + ',' + str(int(row['phase']))
if len(s)>0:
s = s[1:]
count=df_pat['arteries'].value_counts().max()
if count>1:
lma_double = lma_double.append({'PatientID':pat, 'group_concat(distinct phase)':s, 'count(distinct phase)':count}, ignore_index=True)
# 49
phase_information_tmp = phase_information.replace(to_replace=[np.nan], value='', inplace=False)
phase_information_tmp = phase_information_tmp[phase_information_tmp['arteries'].str.contains('LAD')]
lad_double=pd.DataFrame(columns=['PatientID', 'group_concat(distinct phase)', 'count(distinct phase)'])
patients = phase_information_tmp['PatientID'].unique()
# Error for 29-TUR-0005 (LAD,RCA,LCX), (RCA) ???
for pat in patients:
df_pat = phase_information_tmp[phase_information_tmp['PatientID']==pat]
s=''
for index, row in df_pat.iterrows():
s = s + ',' + str(int(row['phase']))
if len(s)>0:
s = s[1:]
count=df_pat['arteries'].value_counts().max()
if count>1:
lad_double = lad_double.append({'PatientID':pat, 'group_concat(distinct phase)':s, 'count(distinct phase)':count}, ignore_index=True)
# 50
phase_information_tmp = phase_information.replace(to_replace=[np.nan], value='', inplace=False)
phase_information_tmp = phase_information_tmp[phase_information_tmp['arteries'].str.contains('LCX')]
lcx_double=pd.DataFrame(columns=['PatientID', 'group_concat(distinct phase)', 'count(distinct phase)'])
patients = phase_information_tmp['PatientID'].unique()
# Error for 29-TUR-0005 (LAD,RCA,LCX), (RCA) ???
for pat in patients:
df_pat = phase_information_tmp[phase_information_tmp['PatientID']==pat]
s=''
for index, row in df_pat.iterrows():
s = s + ',' + str(int(row['phase']))
if len(s)>0:
s = s[1:]
count=df_pat['arteries'].value_counts().max()
if count>1:
lcx_double = lcx_double.append({'PatientID':pat, 'group_concat(distinct phase)':s, 'count(distinct phase)':count}, ignore_index=True)
print('computeCTA02')
# 51
# create view phase_double as select *, 'LAD' as vessel from lad_double union select *, 'RCA' from rca_double union select *, 'LMA' from lma_double union select *, 'LCX' from lcx_double order by PatientID;
phase_double = pd.concat([rca_double, lma_double, lad_double, lcx_double], axis=0).copy()
phase_double= phase_double.reset_index(drop=True)
vessel = pd.Series(['RCA' for i in range(len(rca_double))] + ['LMA' for i in range(len(lma_double))] + ['LAD' for i in range(len(lad_double))] +['LCX' for i in range(len(lcx_double))])
phase_double['vessel'] = vessel
phase_double = phase_double.sort_values('PatientID')
# 54
# create view phase_oka as select * from phase_information where PatientID not in (select distinct PatientID from phase_double) order by PatientID;
phase_information = phase_information.reset_index(drop=True)
phase_double = phase_double.reset_index(drop=True)
phase_oka = pd.DataFrame(columns=phase_double.columns)
patients = list(phase_double['PatientID'])
for index, row in phase_information.iterrows():
if not row['PatientID'] in patients:
phase_oka = phase_oka.append(row)
phase_oka = phase_oka[['PatientID', 'phase', 'arteries']]
# 56
phase_ok = phase_oka.copy()
# 59
df_prct_tmp = df_prct[df_prct['other_best_phase'].notna()]
prct_phase_other = df_prct_tmp[['PatientId', 'other_best_phase']]
prct_phase_other['arteries'] = 'RCA, LAD, LCX'
prct_phase_other = prct_phase_other.rename(columns={'other_best_phase': 'phase'})
# 60
df_prct_tmp = df_prct[df_prct['rca_best_phase'].notna()]
prct_phase_rca = df_prct_tmp[['PatientId', 'rca_best_phase']]
prct_phase_rca['arteries'] = 'RCA'
prct_phase_rca = prct_phase_rca.rename(columns={'rca_best_phase': 'phase'})
# 61
df_prct_tmp = df_prct[df_prct['lad_best_phase'].notna()]
prct_phase_lad = df_prct_tmp[['PatientId', 'lad_best_phase']]
prct_phase_lad['arteries'] = 'LAD'
prct_phase_lad = prct_phase_lad.rename(columns={'lad_best_phase': 'phase'})
# 62
df_prct_tmp = df_prct[df_prct['lcx_best_phase'].notna()]
prct_phase_lcx = df_prct_tmp[['PatientId', 'lcx_best_phase']]
prct_phase_lcx['arteries'] = 'LCX'
prct_phase_lcx = prct_phase_lcx.rename(columns={'lcx_best_phase': 'phase'})
#63
#prct_phases = pd.concat([prct_phase_other, prct_phase_rca, prct_phase_lad, prct_phase_lcx], axis=0).drop_duplicates()
# Replaced to filter phase which ar strings (comments)
prct_phases_tmp = pd.concat([prct_phase_other, prct_phase_rca, prct_phase_lad, prct_phase_lcx], axis=0).drop_duplicates()
prct_phases = pd.DataFrame(columns=prct_phases_tmp.columns)
for index, row in prct_phases_tmp.iterrows():
if isfloat(row['phase']):
prct_phases = prct_phases.append(row)
#66
#create view rca_double as select PatientID, group_concat(distinct phase), count(distinct phase) from phase_information where instr(arteries,'RCA') group by PatientID having count(distinct phase)>1;
#create view prct_rca_double as select PatientId, group_concat(distinct phase), count(distinct phase) from prct_phases where instr(arteries, 'RCA') group by PatientId having count(distinct phase)>1;
prct_phases_tmp = prct_phases.replace(to_replace=[np.nan], value='', inplace=False)
prct_phases_tmp = prct_phases_tmp[prct_phases_tmp['arteries'].str.contains('RCA')]
prct_rca_double=pd.DataFrame(columns=['PatientID', 'group_concat(distinct phase)', 'count(distinct phase)'])
patients = prct_phases_tmp['PatientId'].unique()
for pat in patients:
if len(pat)>0:
df_pat = prct_phases_tmp[prct_phases_tmp['PatientId']==pat]
s=''
for index, row in df_pat.iterrows():
s = s + ',' + row['phase']
if len(s)>0:
s = s[1:]
#count=df_pat['phase'].value_counts().max()
#count=len(df_pat)
#count=df_pat['arteries'].value_counts().max()
count=len(df_pat['phase'].unique())
if count>1:
prct_rca_double = prct_rca_double.append({'PatientID':pat, 'group_concat(distinct phase)':s, 'count(distinct phase)':count}, ignore_index=True)
else:
prct_rca_double = prct_rca_double.append({'PatientID':pat, 'group_concat(distinct phase)':'', 'count(distinct phase)':2}, ignore_index=True)
# 67
prct_phases_tmp = prct_phases.replace(to_replace=[np.nan], value='', inplace=False)
prct_phases_tmp = prct_phases_tmp[prct_phases_tmp['arteries'].str.contains('LAD')]
prct_lad_double=pd.DataFrame(columns=['PatientID', 'group_concat(distinct phase)', 'count(distinct phase)'])
patients = prct_phases_tmp['PatientId'].unique()
for pat in patients:
if len(pat)>0:
df_pat = prct_phases_tmp[prct_phases_tmp['PatientId']==pat]
s=''
for index, row in df_pat.iterrows():
s = s + ',' + row['phase']
if len(s)>0:
s = s[1:]
count=len(df_pat['phase'].unique())
if count>1:
prct_lad_double = prct_lad_double.append({'PatientID':pat, 'group_concat(distinct phase)':s, 'count(distinct phase)':count}, ignore_index=True)
else:
prct_lad_double = prct_lad_double.append({'PatientID':pat, 'group_concat(distinct phase)':'', 'count(distinct phase)':2}, ignore_index=True)
# 68
prct_phases_tmp = prct_phases.replace(to_replace=[np.nan], value='', inplace=False)
prct_phases_tmp = prct_phases_tmp[prct_phases_tmp['arteries'].str.contains('LCX')]
prct_lcx_double=pd.DataFrame(columns=['PatientID', 'group_concat(distinct phase)', 'count(distinct phase)'])
patients = prct_phases_tmp['PatientId'].unique()
for pat in patients:
if len(pat)>0:
df_pat = prct_phases_tmp[prct_phases_tmp['PatientId']==pat]
s=''
for index, row in df_pat.iterrows():
s = s + ',' + row['phase']
if len(s)>0:
s = s[1:]
count=len(df_pat['phase'].unique())
if count>1:
prct_lcx_double = prct_lcx_double.append({'PatientID':pat, 'group_concat(distinct phase)':s, 'count(distinct phase)':count}, ignore_index=True)
else:
prct_lcx_double = prct_lcx_double.append({'PatientID':pat, 'group_concat(distinct phase)':'', 'count(distinct phase)':2}, ignore_index=True)
# 69
prct_phase_double = | pd.concat([prct_rca_double, prct_lad_double, prct_lcx_double], axis=0) | pandas.concat |
import requests
import configparser
import pathlib
import pandas as pd
import json
import csv
import glob
from ratelimit import limits, sleep_and_retry
config = configparser.ConfigParser()
config.read("config.cfg")
accesskey = config["alphavantage"]["accesskey"]
class InvalidInputError(Exception):
pass
class ApiCallFrequencyExceeded(Exception):
pass
@sleep_and_retry
@limits(calls=3, period=60) # TODO parameterize
def search(search_word):
host = "https://www.alphavantage.co"
url = (
f"{host}/query?function=SYMBOL_SEARCH&keywords={search_word}&apikey={accesskey}"
)
r = requests.get(url)
data = r.json()
search_results = [
(res.get("1. symbol"), res.get("2. name")) for res in data["bestMatches"]
]
return search_results
def fetch_time_series_intraday_extended(symbol, interval, num_months, sleep=60):
allowed_intervals = ("1min", "5min", "15min", "30min", "60min")
host = "https://www.alphavantage.co"
if interval not in allowed_intervals:
raise InvalidInputError(f"{interval} not an allowed value for interval.")
if num_months < 1 or num_months > 24:
raise InvalidInputError(
f"{str(num_months)} is out of range. num_months must be between 1 and 24"
)
allowed_slices = [f"year{y}month{m}" for y in [1, 2] for m in range(1, 13)]
months = allowed_slices[slice(0, int(num_months))]
output_path = pathlib.Path(f"Data/{interval}/{symbol}")
output_path.mkdir(parents=True, exist_ok=True)
@sleep_and_retry
@limits(calls=3, period=60) # TODO parameterize this
def do_download(s, url):
print(f"Doing download: {url}")
download = s.get(url)
decoded_content = download.content.decode("utf-8")
data = list(csv.reader(decoded_content.splitlines(), delimiter=","))
if (
"Thank you for using Alpha Vantage! Our standard API call frequency is 5 calls"
in download.text
):
raise ApiCallFrequencyExceeded(download.text)
return data
with requests.Session() as s:
for month in months:
url = f"{host}/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol={symbol}&interval={interval}&slice={month}&apikey={accesskey}"
data = do_download(s, url)
df = pd.DataFrame(data[1:], columns=data[0])
df["time"] = pd.to_datetime(df["time"])
df["open"] = | pd.to_numeric(df["open"]) | pandas.to_numeric |
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the implementation of ``DAGBase``.
``DAGBase`` is a class which provides an interface and common function for sklearn style NOTEARS functions.
"""
import copy
import warnings
from abc import ABCMeta
from typing import Dict, Iterable, List, Tuple, Union
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.preprocessing import StandardScaler
from sklearn.utils.validation import check_is_fitted, check_X_y
from causalnex.plots import (
EDGE_STYLE,
NODE_STYLE,
display_plot_ipython,
display_plot_mpl,
plot_structure,
)
from causalnex.plots.display import Axes, Figure, Image
from causalnex.structure.pytorch import notears
class DAGBase(
BaseEstimator, metaclass=ABCMeta
): # pylint: disable=too-many-instance-attributes
"""
Base class for all sklearn wrappers of the StructureModel.
Implements the sklearn .fit and .predict interface.
"""
# pylint: disable=too-many-arguments
def __init__(
self,
dist_type_schema: Dict[Union[str, int], str] = None,
alpha: float = 0.0,
beta: float = 0.0,
fit_intercept: bool = True,
hidden_layer_units: Iterable[int] = None,
threshold: float = 0.0,
tabu_edges: List = None,
tabu_parent_nodes: List = None,
tabu_child_nodes: List = None,
dependent_target: bool = True,
enforce_dag: bool = False,
standardize: bool = False,
target_dist_type: str = None,
notears_mlp_kwargs: Dict = None,
):
"""
Args:
dist_type_schema: The dist type schema corresponding to the X data passed to fit or predict.
It maps the pandas column name in X to the string alias of a dist type.
If X is a np.ndarray, it maps the positional index to the string alias of a dist type.
A list of alias names can be found in ``dist_type/__init__.py``.
If None, assumes that all data in X is continuous.
alpha: l1 loss weighting. When using nonlinear layers this is only applied
to the first layer.
beta: l2 loss weighting. Applied across all layers. Reccomended to use this
when fitting nonlinearities.
fit_intercept: Whether to fit an intercept in the structure model
equation. Use this if variables are offset.
hidden_layer_units: An iterable where its length determine the number of layers used,
and the numbers determine the number of nodes used for the layer in order.
threshold: The thresholding to apply to the DAG weights.
If 0.0, does not apply any threshold.
tabu_edges: Tabu edges passed directly to the NOTEARS algorithm.
tabu_parent_nodes: Tabu nodes passed directly to the NOTEARS algorithm.
tabu_child_nodes: Tabu nodes passed directly to the NOTEARS algorithm.
dependent_target: If True, constrains NOTEARS so that y can only
be dependent (i.e. cannot have children) and imputes from parent nodes.
enforce_dag: If True, thresholds the graph until it is a DAG.
NOTE a properly trained model should be a DAG, and failure
indicates other issues. Use of this is only recommended if
features have similar units, otherwise comparing edge weight
magnitude has limited meaning.
standardize: Whether to standardize the X and y variables before fitting.
The L-BFGS algorithm used to fit the underlying NOTEARS works best on data
all of the same scale so this parameter is reccomended.
notears_mlp_kwargs: Additional arguments for the NOTEARS MLP model.
target_dist_type: The distribution type of the target.
Uses the same aliases as dist_type_schema.
Raises:
TypeError: if alpha is not numeric.
TypeError: if beta is not numeric.
TypeError: if fit_intercept is not a bool.
TypeError: if threshold is not numeric.
NotImplementedError: if target_dist_type not in supported_types
"""
if not isinstance(alpha, (int, float)):
raise TypeError("alpha should be numeric")
if not isinstance(beta, (int, float)):
raise TypeError("beta should be numeric")
if not isinstance(fit_intercept, bool):
raise TypeError("fit_intercept should be a bool")
if not isinstance(threshold, (int, float)):
raise TypeError("threshold should be numeric")
# supported types is a class attr in child class
self._supported_types: str
# defensive check
if (target_dist_type not in self._supported_types) and (
target_dist_type is not None
):
raise NotImplementedError(
f"Currently only implements [{', '.join(self._supported_types)}] dist types."
f" Got: {target_dist_type}"
)
# core causalnex parameters
self.alpha = alpha
self.beta = beta
self.fit_intercept = fit_intercept
self.hidden_layer_units = hidden_layer_units
self.dist_type_schema = dist_type_schema
self.threshold = threshold
self.tabu_edges = tabu_edges
self.tabu_parent_nodes = tabu_parent_nodes
self.tabu_child_nodes = tabu_child_nodes
self.target_dist_type = target_dist_type
self.notears_mlp_kwargs = notears_mlp_kwargs
# sklearn wrapper paramters
self.dependent_target = dependent_target
self.enforce_dag = enforce_dag
self.standardize = standardize
def fit(self, X: Union[pd.DataFrame, np.ndarray], y: Union[pd.Series, np.ndarray]):
"""
Fits the sm model using the concat of X and y.
"""
# defensive X, y checks
check_X_y(X, y, y_numeric=True)
# force X, y to DataFrame, Series for later calculations
X = pd.DataFrame(X)
y = pd.Series(y)
# force name so that name != None (causes errors in notears)
y.name = y.name or "__target"
# if self.dist_type_schema is None, assume all columns are continuous
# NOTE: this is copied due to later insertions
dist_type_schema = copy.deepcopy(self.dist_type_schema) or {
col: "cont" for col in X.columns
}
if self.standardize:
# only standardize the continuous dist type columns.
self.continuous_col_idxs = [
X.columns.get_loc(col)
for col, alias in dist_type_schema.items()
if alias == "cont"
]
# copy X to prevet changes to underlying array data
X = X.copy()
self._ss_X = StandardScaler()
X.iloc[:, self.continuous_col_idxs] = self._ss_X.fit_transform(
X.iloc[:, self.continuous_col_idxs]
)
# if its a continuous target also standardize
if self.target_dist_type == "cont":
y = y.copy()
self._ss_y = StandardScaler()
y[:] = self._ss_y.fit_transform(y.values.reshape(-1, 1)).reshape(-1)
# add the target to the dist_type_schema
# NOTE: this must be done AFTER standardize
dist_type_schema[y.name] = self.target_dist_type
# preserve the feature and target colnames
self._features = tuple(X.columns)
self._target = y.name
# concat X and y along column axis
X = | pd.concat([X, y], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
from random import sample
from xgboost import XGBRegressor
from random import choices,seed
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.stats import t
import os
os.chdir("c://users/jliv/downloads/")
dat=pd.read_csv("auto-mpg.data",header=None)
"""
1. mpg: continuous
2. cylinders: multi-valued discrete
3. displacement: continuous
4. horsepower: continuous
5. weight: continuous
6. acceleration: continuous
7. model year: multi-valued discrete
8. origin: multi-valued discrete
9. car name: string (unique for each instance)
"""
pd.set_option("display.max_columns",19)
df=pd.DataFrame(dat[0].str.split(expand=True))
df=df[[0,1,2,3,4,5,6,7]].copy()
columns = ['mpg','cyl','disp','hp','weight','acc','yr','origin']
df.columns = columns
df.replace("?",np.nan,inplace=True)
for i in df.columns:
df[i]=df[i].astype(float)
for i in columns:
print(i,len(df[df[i].isna()]))
df.dropna(inplace=True)
seed(42)
train=sample(list(df.index),int(len(df.index)*.8))
train.sort()
test=[i for i in df.index if i not in train]
kpi='mpg'
feats=['cyl', 'disp', 'hp', 'weight', 'acc', 'yr', 'origin']
X=df[df.index.isin(train)][feats].copy()
Y=df[df.index.isin(train)][kpi]
xtest=df[df.index.isin(test)][feats].copy()
ytest=df[df.index.isin(test)][kpi]
means=np.mean(X)
stds=np.std(X)
X=(X-means)/stds
xtest=(xtest-means)/stds
corrdf=X.copy()
corrdf[kpi]=Y
corrdf.corr()[kpi]
corrdf.corr()
seed(42)
fold = pd.Series(choices(range(1,9),k=len(X)),index=X.index)
class mixed_model:
def __init__(self,mod,lr,epoch,optimization):
self.lr=lr
self.epoch=epoch
self.mod=mod
self.optimization=optimization
def fit(self,x,y,linear_feats):
self.x=x
self.y=y
self.linear_feats=linear_feats
self.other_feats=[i for i in self.x.columns if i not in self.linear_feats]
#self.coefs_=np.random.normal(0,.5,len(self.linear_feats))
self.coefs_=np.zeros(len(self.linear_feats))
self.rmse_ = []
self.coefs_per_epoch=[]
for e in range(0,self.epoch):
self.mod.fit(self.x[self.other_feats],self.y-self.x[self.linear_feats]@self.coefs_)
resid = (self.y-self.x[self.linear_feats]@self.coefs_-self.mod.predict(self.x[self.other_feats]))
[email protected][self.linear_feats]
self.rmse_.append(np.mean(resid**2)**.5)
if self.optimization =='Newtonian':
H = np.linalg.pinv(self.x[self.linear_feats][email protected][self.linear_feats])
term = grad@H
else:
term = grad
self.coefs_=self.coefs_+self.lr*grad
self.coefs_=self.coefs_+self.lr*term
self.coefs_per_epoch.append(list(self.coefs_))
self.epochs_completed_=e
self.converged_ = []
#if e>=80:
if e >= self.epoch*.1:
"""
Must run 1/4 of epochs.
Stopping Criteria:
T-test of sample means for parameter estimates and model loss with:
X1: Third quarter of parameter chain
X2: Fourth quarter of parameter chain
If all parameters and loss achieve convergence with 95% confidence:
Break
If the final Epoch is reached without some parameters or loss converging:
Deliver warning to increase epoch parameter
"""
for i in range(len(self.linear_feats)):
parameter_chain=np.array(self.coefs_per_epoch)[:,i]
X1= parameter_chain[int(e*.5):int(e*.75)]
X2= parameter_chain[int(e*.75):]
v=len(X1)+len(X2)-2
T=(np.mean(X1)-np.mean(X2))/((np.var(X1)/len(X1)+np.var(X2)/len(X2))**.5)
absT=abs(T)
if absT<=t.ppf(1-.05/2, v):
self.converged_.append(1)
else:
self.converged_.append(0)
parameter_chain=self.rmse_
X1= parameter_chain[int(e*.5):int(e*.75)]
X2= parameter_chain[int(e*.75):]
v=len(X1)+len(X2)-2
T=(np.mean(X1)-np.mean(X2))/((np.var(X1)/len(X1)+np.var(X2)/len(X2))**.5)
absT=abs(T)
if absT<=t.ppf(1-.05/2, v):
self.converged_.append(1)
else:
self.converged_.append(0)
"""
if absT<=t.ppf(1-.05/2, v):
if np.mean(self.converged_)!=1:
print("Warning: Some parameters may not have converged, perhaps increase epochs.")
break"""
#If all parameters converged, break; if last epoch is reached without convergence, produce warning
if np.mean(self.converged_)==1:
break
elif (np.mean(self.converged_)!=1)&(e==self.epoch-1):
print("Warning: Some parameters or Loss may not have converged, perhaps increase epochs.")
self.coef_means=pd.Series(np.mean(np.array(self.coefs_per_epoch)[int(self.epochs_completed_*.5):,],axis=0),index=self.linear_feats)
self.mod.fit(self.x[self.other_feats],self.y-self.x[self.linear_feats]@self.coef_means)
def predict(self,x):
#self.mod.fit(self.x[self.other_feats],self.y-self.x[self.linear_feats]@self.coef_means)
pred=x[self.linear_feats]@self.coef_means+self.mod.predict(x[self.other_feats])
return pred
def predict_last_coefs(self,x):
#self.mod.fit(self.x[self.other_feats],self.y-self.x[self.linear_feats]@self.coefs_)
pred=x[self.linear_feats]@self.coefs_+self.mod.predict(x[self.other_feats])
return pred
X['int']=1
xtest['int']=1
rmse_PR=[]
rmse_REG=[]
rmse_XGB=[]
r2_PR=[]
r2_REG=[]
r2_XGB=[]
for f in range(1,9):
xt=X[fold!=f].copy()
yt=Y[fold!=f]
xv=X[fold==f].copy()
yv=Y[fold==f]
mod=mixed_model(
mod=XGBRegressor(n_estimators=25,
max_depth=6,
random_state=42),
lr=.1,
epoch=500,
optimization='Gradient'
)
mod.fit(xt,yt,
linear_feats=['weight',
'disp',
'int']
)
ypred=mod.predict(xv)
r2=1-sum((yv-ypred)**2)/sum((yv-np.mean(yv))**2)
rmse=np.mean((yv-ypred)**2)**.5
rmse_PR.append(rmse)
r2_PR.append(r2)
print("mixed model R2,RSME: ",round(r2,3),round(rmse,2))
##Regression
coef=np.linalg.pinv(xt.T@xt)@(xt.T@yt)
yfit_regression=xt@coef
ypred_regression=xv@coef
coef_df=pd.DataFrame(
{'feat':xt.columns,
'coef':coef}
)
r2_regression=1-sum((yv-ypred_regression)**2)/sum((yv-np.mean(yv))**2)
rmse_regression=np.mean((yv-ypred_regression)**2)**.5
rmse_REG.append(rmse_regression)
r2_REG.append(r2_regression)
##XGB
xgb = mod.mod.fit(xt,yt)
ypred_xgb=pd.Series(xgb.predict(xv),index=yv.index)
r2_xgb=1-sum((yv-ypred_xgb)**2)/sum((yv-np.mean(yv))**2)
rmse_xgb=np.mean((yv-ypred_xgb)**2)**.5
rmse_XGB.append(rmse_xgb)
r2_XGB.append(r2_xgb)
cv_out=pd.DataFrame({'fold':range(1,9)})
cv_out['rmse_PR']=rmse_PR
cv_out['rmse_REG']=rmse_REG
cv_out['rmse_XGB']=rmse_XGB
cv_out['r2_PR']=r2_PR
cv_out['r2_REG']=r2_REG
cv_out['r2_XGB']=r2_XGB
print(np.round(np.mean(cv_out,axis=0),3))
#TEST
kpi='mpg'
feats=['cyl', 'disp', 'hp', 'weight', 'acc', 'yr', 'origin']
xt=X.copy()
yt=Y.copy()
xv=xtest.copy()
yv=ytest.copy()
"""
mod=mixed_model(
mod=XGBRegressor(n_estimators=25,
max_depth=6,
random_state=42),
lr=10,
epoch=2800,
optimization='Newtonian'
)
"""
mod=mixed_model(
mod=XGBRegressor(n_estimators=25,
max_depth=6,
random_state=42),
lr=.1,
epoch=1500,
optimization='Gradient'
)
mod.fit(xt,yt,
linear_feats=['weight',
'disp',
'int']
)
#ypred=mod.predict_last_coefs(xv)
mod.coefs_
mod.converged_
mod.coefs_per_epoch
mod.epochs_completed_
#pd.DataFrame(round(mod.coef_means,2),columns=['Coefficient']).to_csv("downloads/pr_coef.csv")
ypred=mod.predict(xv)
r2=1-sum((yv-ypred)**2)/sum((yv-np.mean(yv))**2)
rmse=np.mean((yv-ypred)**2)**.5
for i in range(len(mod.linear_feats)):
plt.plot(np.array(mod.coefs_per_epoch)[:,i])
plt.title("Coefficient of "+mod.linear_feats[i])
plt.xlabel("Epoch")
plt.show()
plt.plot(mod.rmse_)
plt.title("Training RMSE")
plt.xlabel("Epoch")
plt.show()
print("mixed model R2,RSME: ",round(r2,3),round(rmse,2))
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax4 = plt.subplot(gs[3])
ax1.plot(np.array(mod.coefs_per_epoch)[:,0])
ax1.set_title(mod.linear_feats[0])
ax1.set_xticklabels([])
ax2.plot(np.array(mod.coefs_per_epoch)[:,1])
ax2.set_title(mod.linear_feats[1])
ax2.set_xticklabels([])
ax3.plot(np.array(mod.coefs_per_epoch)[:,2])
ax3.set_title(mod.linear_feats[2])
ax3.set_xlabel("Epoch")
ax4.plot(mod.rmse_)
ax4.set_title("RMSE")
ax4.set_xlabel("Epoch")
plt.show()
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax4 = plt.subplot(gs[3])
converged = int(mod.epochs_completed_*.5)
ax1.hist(np.array(mod.coefs_per_epoch)[converged:,0])
ax1.set_title(mod.linear_feats[0])
ax2.hist(np.array(mod.coefs_per_epoch)[converged:,1])
ax2.set_title(mod.linear_feats[1])
ax3.hist(np.array(mod.coefs_per_epoch)[converged:,2])
ax3.set_xlabel(mod.linear_feats[2])
ax4.hist(mod.rmse_[converged:])
ax4.set_xlabel("RMSE")
plt.show()
#Testing Parameter Convergence
for i in range(len(mod.linear_feats)):
parameter_chain=np.array(mod.coefs_per_epoch)[:,i]
column=mod.linear_feats[i]
X1= parameter_chain[int(mod.epochs_completed_*.5):int(mod.epochs_completed_*.75)]
X2= parameter_chain[int(mod.epochs_completed_*.75):]
v=len(X1)+len(X2)-2
T=(np.mean(X1)-np.mean(X2))/((np.var(X1)/len(X1)+np.var(X2)/len(X2))**.5)
absT=abs(T)
print(column+" Converged: ",~(absT>t.ppf(1-.05/2, v)))
#Testing Parameter Convergence
parameter_chain=mod.rmse_
X1= parameter_chain[int(mod.epochs_completed_*.5):int(mod.epochs_completed_*.75)]
X2= parameter_chain[int(mod.epochs_completed_*.75):]
v=len(X1)+len(X2)-2
T=(np.mean(X1)-np.mean(X2))/((np.var(X1)/len(X1)+np.var(X2)/len(X2))**.5)
absT=abs(T)
print("RMSE Converged: ",~(absT>t.ppf(1-.05/2, v)))
##Regression
coef=np.linalg.pinv(xt.T@xt)@(xt.T@yt)
yfit_regression=xt@coef
ypred_regression=xv@coef
coef_df=pd.DataFrame(
{'feat':xt.columns,
'coef':coef}
)
round(coef_df,2).to_csv("coef_df.csv")
r2_regression=1-sum((yv-ypred_regression)**2)/sum((yv-np.mean(yv))**2)
rmse_regression=np.mean((yv-ypred_regression)**2)**.5
print("Regression R2,RSME: ",round(r2_regression,3),round(rmse_regression,2))
##XGB
xgb = mod.mod.fit(xt,yt)
ypred_xgb=pd.Series(xgb.predict(xv),index=yv.index)
r2_xgb=1-sum((yv-ypred_xgb)**2)/sum((yv-np.mean(yv))**2)
rmse_xgb=np.mean((yv-ypred_xgb)**2)**.5
print("XGB R2,RSME: ",round(r2_xgb,3),round(rmse_xgb,2))
"""
Testing high multicollinearity behavior. XGB is robust to this, regression is not.
Two features highly correlated are CYL and DISP.
"""
print('Looking at multicollinearity:')
#Multicollinearity
mod=mixed_model(
mod=XGBRegressor(n_estimators=25,
max_depth=6,
random_state=42),
lr=3,
epoch=3000,
optimization='Newtonian'
)
mod.fit(xt,yt,
linear_feats=['weight',
'disp',
'cyl','yr',
'int']
)
ypred=mod.predict(xv)
r2=1-sum((yv-ypred)**2)/sum((yv-np.mean(yv))**2)
rmse=np.mean((yv-ypred)**2)**.5
for i in range(len(mod.linear_feats)):
plt.plot(np.array(mod.coefs_per_epoch)[:,i])
plt.title("Coefficient of "+mod.linear_feats[i])
plt.xlabel("Epoch")
plt.show()
plt.plot(mod.rmse_)
plt.title("Training RMSE")
plt.xlabel("Epoch")
plt.show()
print("mixed model R2,RSME: ",round(r2,3),round(rmse,2))
mm_pr_coefs = pd.DataFrame(mod.coef_means,columns=['Coef'])
round(mm_pr_coefs.merge(round(corrdf.corr()[[kpi]],2),how='left',left_index=True,right_index=True),2).to_csv('multicollinear_pr_coefs.csv')
round(pd.DataFrame(mod.coef_means,columns=['Coef']),2).to_csv('multicollinear_pr_coefs.csv')
round(corrdf.corr()[kpi],2)
corrdf.corr()
##Regression
xt1=xt[['weight','disp','cyl','yr','int']].copy()
xv1=xv[['weight','disp','cyl','yr','int']].copy()
coef=np.linalg.pinv(xt1.T@xt1)@(xt1.T@yt)
yfit_regression=xt1@coef
ypred_regression=xv1@coef
coef_df=pd.DataFrame(
{'feat':xt1.columns,
'coef':coef}
)
coef_df.set_index('feat',inplace=True)
round(coef_df.merge(round(corrdf.corr()[[kpi]],2),how='left',left_index=True,right_index=True),2).to_csv('collinear_coefs_reg.csv')
r2_regression=1-sum((yv-ypred_regression)**2)/sum((yv-np.mean(yv))**2)
rmse_regression=np.mean((yv-ypred_regression)**2)**.5
print("Regression R2,RSME: ",round(r2_regression,3),round(rmse_regression,2))
#Regression Embedded
from sklearn.linear_model import LinearRegression
mod=mixed_model(
mod=LinearRegression(fit_intercept=False),
lr=1,
epoch=500,
optimization='Newtonian'
)
mod.fit(xt,yt,
linear_feats=['weight',
'disp',
'cyl','yr',
'int']
)
ypred=mod.predict(xv)
r2=1-sum((yv-ypred)**2)/sum((yv-np.mean(yv))**2)
rmse=np.mean((yv-ypred)**2)**.5
for i in range(len(mod.linear_feats)):
plt.plot(np.array(mod.coefs_per_epoch)[:,i])
plt.title("Coefficient of "+mod.linear_feats[i])
plt.xlabel("Epoch")
plt.show()
plt.plot(mod.rmse_)
plt.title("Training RMSE")
plt.xlabel("Epoch")
plt.show()
print("mixed model R2,RSME: ",round(r2,3),round(rmse,2))
mm_pr_with_regression_coefs = | pd.DataFrame(mod.coef_means,columns=['Coef']) | pandas.DataFrame |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="A", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_B_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_B_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_B_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_B.asfreq("A") == ival_B_to_A
assert ival_B_end_of_year.asfreq("A") == ival_B_to_A
assert ival_B.asfreq("Q") == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q
assert ival_B.asfreq("M") == ival_B_to_M
assert ival_B_end_of_month.asfreq("M") == ival_B_to_M
assert ival_B.asfreq("W") == ival_B_to_W
assert ival_B_end_of_week.asfreq("W") == ival_B_to_W
assert ival_B.asfreq("D") == ival_B_to_D
assert ival_B.asfreq("H", "S") == ival_B_to_H_start
assert ival_B.asfreq("H", "E") == ival_B_to_H_end
assert ival_B.asfreq("Min", "S") == ival_B_to_T_start
assert ival_B.asfreq("Min", "E") == ival_B_to_T_end
assert ival_B.asfreq("S", "S") == ival_B_to_S_start
assert ival_B.asfreq("S", "E") == ival_B_to_S_end
assert ival_B.asfreq("B") == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq="D", year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7)
ival_D_friday = Period(freq="D", year=2007, month=1, day=5)
ival_D_saturday = Period(freq="D", year=2007, month=1, day=6)
ival_D_sunday = Period(freq="D", year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq="B", year=2007, month=1, day=5)
ival_B_monday = Period(freq="B", year=2007, month=1, day=8)
ival_D_to_A = Period(freq="A", year=2007)
ival_Deoq_to_AJAN = Period(freq="A-JAN", year=2008)
ival_Deoq_to_AJUN = Period(freq="A-JUN", year=2007)
ival_Deoq_to_ADEC = Period(freq="A-DEC", year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq="M", year=2007, month=1)
ival_D_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_D_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_D_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_D_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_D.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("A-JAN") == ival_Deoq_to_AJAN
assert ival_D_end_of_quarter.asfreq("A-JUN") == ival_Deoq_to_AJUN
assert ival_D_end_of_quarter.asfreq("A-DEC") == ival_Deoq_to_ADEC
assert ival_D_end_of_year.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC
assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN
assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN
assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC
assert ival_D.asfreq("M") == ival_D_to_M
assert ival_D_end_of_month.asfreq("M") == ival_D_to_M
assert ival_D.asfreq("W") == ival_D_to_W
assert ival_D_end_of_week.asfreq("W") == ival_D_to_W
assert ival_D_friday.asfreq("B") == ival_B_friday
assert ival_D_saturday.asfreq("B", "S") == ival_B_friday
assert ival_D_saturday.asfreq("B", "E") == ival_B_monday
assert ival_D_sunday.asfreq("B", "S") == ival_B_friday
assert ival_D_sunday.asfreq("B", "E") == ival_B_monday
assert ival_D.asfreq("H", "S") == ival_D_to_H_start
assert ival_D.asfreq("H", "E") == ival_D_to_H_end
assert ival_D.asfreq("Min", "S") == ival_D_to_T_start
assert ival_D.asfreq("Min", "E") == ival_D_to_T_end
assert ival_D.asfreq("S", "S") == ival_D_to_S_start
assert ival_D.asfreq("S", "E") == ival_D_to_S_end
assert ival_D.asfreq("D") == ival_D
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_H_end_of_quarter = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_H_end_of_month = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_H_end_of_week = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_H_end_of_day = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_H_end_of_bus = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_H_to_A = Period(freq="A", year=2007)
ival_H_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_H_to_M = Period(freq="M", year=2007, month=1)
ival_H_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_H_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_H_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_H_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_H_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=59
)
ival_H_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_H_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=59, second=59
)
assert ival_H.asfreq("A") == ival_H_to_A
assert ival_H_end_of_year.asfreq("A") == ival_H_to_A
assert ival_H.asfreq("Q") == ival_H_to_Q
assert ival_H_end_of_quarter.asfreq("Q") == ival_H_to_Q
assert ival_H.asfreq("M") == ival_H_to_M
assert ival_H_end_of_month.asfreq("M") == ival_H_to_M
assert ival_H.asfreq("W") == ival_H_to_W
assert ival_H_end_of_week.asfreq("W") == ival_H_to_W
assert ival_H.asfreq("D") == ival_H_to_D
assert ival_H_end_of_day.asfreq("D") == ival_H_to_D
assert ival_H.asfreq("B") == ival_H_to_B
assert ival_H_end_of_bus.asfreq("B") == ival_H_to_B
assert ival_H.asfreq("Min", "S") == ival_H_to_T_start
assert ival_H.asfreq("Min", "E") == ival_H_to_T_end
assert ival_H.asfreq("S", "S") == ival_H_to_S_start
assert ival_H.asfreq("S", "E") == ival_H_to_S_end
assert ival_H.asfreq("H") == ival_H
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
ival_T_end_of_year = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_T_end_of_quarter = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_T_end_of_month = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_T_end_of_week = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_T_end_of_day = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_T_end_of_bus = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_T_end_of_hour = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=59
)
ival_T_to_A = Period(freq="A", year=2007)
ival_T_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_T_to_M = Period(freq="M", year=2007, month=1)
ival_T_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_T_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_T_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_T_to_H = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_T_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=59
)
assert ival_T.asfreq("A") == ival_T_to_A
assert ival_T_end_of_year.asfreq("A") == ival_T_to_A
assert ival_T.asfreq("Q") == ival_T_to_Q
assert ival_T_end_of_quarter.asfreq("Q") == ival_T_to_Q
assert ival_T.asfreq("M") == ival_T_to_M
assert ival_T_end_of_month.asfreq("M") == ival_T_to_M
assert ival_T.asfreq("W") == ival_T_to_W
assert ival_T_end_of_week.asfreq("W") == ival_T_to_W
assert ival_T.asfreq("D") == ival_T_to_D
assert ival_T_end_of_day.asfreq("D") == ival_T_to_D
assert ival_T.asfreq("B") == ival_T_to_B
assert ival_T_end_of_bus.asfreq("B") == ival_T_to_B
assert ival_T.asfreq("H") == ival_T_to_H
assert ival_T_end_of_hour.asfreq("H") == ival_T_to_H
assert ival_T.asfreq("S", "S") == ival_T_to_S_start
assert ival_T.asfreq("S", "E") == ival_T_to_S_end
assert ival_T.asfreq("Min") == ival_T
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0)
ival_S_end_of_year = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_quarter = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_month = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_week = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
ival_S_end_of_day = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
ival_S_end_of_bus = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
ival_S_end_of_hour = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=59, second=59
)
ival_S_end_of_minute = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=59
)
ival_S_to_A = Period(freq="A", year=2007)
ival_S_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_S_to_M = | Period(freq="M", year=2007, month=1) | pandas.Period |
"""
Compare COVID-19 simulation outputs to data.
Estimate Rt using epyestim
"""
import argparse
import os
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import sys
import matplotlib.dates as mdates
import epyestim
import epyestim.covid19 as covid19
import seaborn as sns
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-stem",
"--stem",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default="Local"
)
return parser.parse_args()
def get_distributions(show_plot=False):
si_distrb = covid19.generate_standard_si_distribution()
delay_distrb = covid19.generate_standard_infection_to_reporting_distribution()
if show_plot:
fig, axs = plt.subplots(1, 2, figsize=(12, 3))
axs[0].bar(range(len(si_distrb)), si_distrb, width=1)
axs[1].bar(range(len(delay_distrb)), delay_distrb, width=1)
axs[0].set_title('Default serial interval distribution')
axs[1].set_title('Default infection-to-reporting delay distribution')
return si_distrb, delay_distrb
def rt_plot(df, plotname,first_day=None, last_day=None):
fig = plt.figure(figsize=(16, 8))
fig.suptitle(x=0.5, y=0.989, t='Estimated time-varying reproductive number (Rt)')
fig.subplots_adjust(right=0.97, left=0.05, hspace=0.4, wspace=0.2, top=0.93, bottom=0.05)
palette = sns.color_palette('husl', 8)
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
"""Combine demand, hydro, wind, and solar traces into a single DataFrame"""
import os
import time
import pandas as pd
import matplotlib.pyplot as plt
def _pad_column(col, direction):
"""Pad values forwards or backwards to a specified date"""
# Drop missing values
df = col.dropna()
# Convert to DataFrame
df = df.to_frame()
# Options that must change depending on direction in which to pad
if direction == 'forward':
keep = 'last'
new_index = pd.date_range(start=df.index[0], end='2051-01-01 00:00:00', freq='1H')
elif direction == 'backward':
keep = 'first'
new_index = pd.date_range(start='2016-01-01 01:00:00', end=df.index[-1], freq='1H')
else:
raise Exception(f'Unexpected direction: {direction}')
# Update index
df = df.reindex(new_index)
def _get_hour_of_year(row):
"""Get hour of year"""
# Get day of year - adjust by 1 minute so last timestamp (2051-01-01 00:00:00)
# is assigned to 2050. Note this timestamp actually corresponds to the interval
# 2050-12-31 23:00:00 to 2051-01-01 00:00:00
day_timestamp = row.name - | pd.Timedelta(minutes=1) | pandas.Timedelta |
# coding: utf-8
# # Data conversion for training the model
# In[1]:
import os
import shutil
import pandas as pd
import PIL.Image
from tqdm import tqdm
import concurrent.futures as futures
# In[3]:
DATA_PATH = "/mnt/stg/inclusive-images-challenge/"
RAW_PATH = f'{DATA_PATH}raw/'
TGT_PATH = f'{DATA_PATH}train/'
# In[4]:
os.makedirs(TGT_PATH, exist_ok=True)
# In[5]:
print("Loading labels data frame...")
df_label_names = pd.read_csv(f'{DATA_PATH}class-descriptions.csv')
df_trainable_labels = pd.read_csv(f'{DATA_PATH}classes-trainable.csv')
print("Loading bounding box data...")
df_bboxes = | pd.read_csv(f'{DATA_PATH}train_bounding_boxes.csv') | pandas.read_csv |
from seesaw.dataset_manager import GlobalDataManager
from ray.data.extensions import TensorArray
import torchvision
from torchvision import transforms as T
import numpy as np
import pandas as pd
from .dataset_tools import *
from torch.utils.data import DataLoader
import math
from tqdm.auto import tqdm
import torch
from .query_interface import *
from .embeddings import make_clip_transform, ImTransform, XEmbedding
from .dataset_search_terms import *
import pyroaring as pr
from operator import itemgetter
import PIL
from .dataset_manager import VectorIndex
import math
import annoy
import os
from typing import List
def _postprocess_results(acc):
flat_acc = {
"iis": [],
"jjs": [],
"dbidx": [],
"vecs": [],
"zoom_factor": [],
"zoom_level": [],
}
flat_vecs = []
# {'accs':accs, 'sf':sf, 'dbidx':dbidx, 'zoom_level':zoom_level}
for item in acc:
acc0, sf, dbidx, zl = itemgetter("accs", "sf", "dbidx", "zoom_level")(item)
acc0 = acc0.squeeze(0)
acc0 = acc0.transpose((1, 2, 0))
iis, jjs = np.meshgrid(
range(acc0.shape[0]), range(acc0.shape[1]), indexing="ij"
)
# iis = iis.reshape(-1, acc0)
iis = iis.reshape(-1)
jjs = jjs.reshape(-1)
acc0 = acc0.reshape(-1, acc0.shape[-1])
imids = np.ones_like(iis) * dbidx
zf = np.ones_like(iis) * (1.0 / sf)
zl = np.ones_like(iis) * zl
flat_acc["iis"].append(iis)
flat_acc["jjs"].append(jjs)
flat_acc["dbidx"].append(imids)
flat_acc["vecs"].append(acc0)
flat_acc["zoom_factor"].append(zf)
flat_acc["zoom_level"].append(zl)
flat = {}
for k, v in flat_acc.items():
flat[k] = np.concatenate(v)
vecs = flat["vecs"]
del flat["vecs"]
vec_meta = pd.DataFrame(flat)
vecs = vecs.astype("float32")
vecs = vecs / (np.linalg.norm(vecs, axis=-1, keepdims=True) + 1e-6)
vec_meta = vec_meta.assign(file_path=item["file_path"])
vec_meta = vec_meta.assign(vectors=TensorArray(vecs))
return vec_meta
def preprocess_ds(localxclip, ds, debug=False):
txds = TxDataset(ds, tx=pyramid_tx(non_resized_transform(224)))
acc = []
if debug:
num_workers = 0
else:
num_workers = 4
for dbidx, tup in enumerate(
tqdm(
DataLoader(
txds,
num_workers=num_workers,
shuffle=False,
batch_size=1,
collate_fn=lambda x: x,
),
total=len(txds),
)
):
[(ims, sfs)] = tup
for zoom_level, (im, sf) in enumerate(zip(ims, sfs), start=1):
accs = localxclip.from_image(preprocessed_image=im, pooled=False)
acc.append((accs, sf, dbidx, zoom_level))
return _postprocess_results(acc)
def pyramid_centered(im, i, j):
cy = (i + 1) * 112.0
cx = (j + 1) * 112.0
scales = [112, 224, 448]
crs = []
w, h = im.size
for s in scales:
tup = (
np.clip(cx - s, 0, w),
np.clip(cy - s, 0, h),
np.clip(cx + s, 0, w),
np.clip(cy + s, 0, h),
)
crs.append(im.crop(tup))
return crs
def zoom_out(im: PIL.Image, factor=0.5, abs_min=224):
"""
returns image one zoom level out, and the scale factor used
"""
w, h = im.size
mindim = min(w, h)
target_size = max(math.floor(mindim * factor), abs_min)
if (
target_size * math.sqrt(factor) <= abs_min
): # if the target size is almost as large as the image,
# jump to that scale instead
target_size = abs_min
target_factor = target_size / mindim
target_w = max(
math.floor(w * target_factor), 224
) # corrects any rounding effects that make the size 223
target_h = max(math.floor(h * target_factor), 224)
im1 = im.resize((target_w, target_h))
assert min(im1.size) >= abs_min
return im1, target_factor
def rescale(im, scale, min_size):
(w, h) = im.size
target_w = max(math.floor(w * scale), min_size)
target_h = max(math.floor(h * scale), min_size)
return im.resize(size=(target_w, target_h), resample=PIL.Image.BILINEAR)
def pyramid(im, factor=0.71, abs_min=224):
## if im size is less tha the minimum, expand image to fit minimum
## try following: orig size and abs min size give you bounds
assert factor < 1.0
factor = 1.0 / factor
size = min(im.size)
end_size = abs_min
start_size = max(size, abs_min)
start_scale = start_size / size
end_scale = end_size / size
## adjust start scale
ntimes = math.ceil(math.log(start_scale / end_scale) / math.log(factor))
start_size = math.ceil(math.exp(ntimes * math.log(factor) + math.log(abs_min)))
start_scale = start_size / size
factors = np.geomspace(
start=start_scale, stop=end_scale, num=ntimes + 1, endpoint=True
).tolist()
ims = []
for sf in factors:
imout = rescale(im, scale=sf, min_size=abs_min)
ims.append(imout)
assert len(ims) > 0
assert min(ims[0].size) >= abs_min
assert min(ims[-1].size) == abs_min
return ims, factors
def trim_edge(target_divisor=112):
def fun(im1):
w1, h1 = im1.size
spare_h = h1 % target_divisor
spare_w = w1 % target_divisor
im1 = im1.crop((0, 0, w1 - spare_w, h1 - spare_h))
return im1
return fun
class TrimEdge:
def __init__(self, target_divisor=112):
self.target_divisor = target_divisor
def __call__(self, im1):
w1, h1 = im1.size
spare_h = h1 % self.target_divisor
spare_w = w1 % self.target_divisor
im1 = im1.crop((0, 0, w1 - spare_w, h1 - spare_h))
return im1
def torgb(image):
return image.convert("RGB")
def tofloat16(x):
return x.type(torch.float16)
def non_resized_transform(base_size):
return ImTransform(
visual_xforms=[torgb],
tensor_xforms=[
T.ToTensor(),
T.Normalize(
(0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711),
),
# tofloat16
],
)
class PyramidTx:
def __init__(self, tx, factor, min_size):
self.tx = tx
self.factor = factor
self.min_size = min_size
def __call__(self, im):
ims, sfs = pyramid(im, factor=self.factor, abs_min=self.min_size)
ppims = []
for im in ims:
ppims.append(self.tx(im))
return ppims, sfs
def pyramid_tx(tx):
def fn(im):
ims, sfs = pyramid(im)
ppims = []
for im in ims:
ppims.append(tx(im))
return ppims, sfs
return fn
def augment_score(db, tup, qvec):
im = db.raw[tup.dbidx]
ims = pyramid(im, tup.iis, tup.jjs)
tx = make_clip_transform(n_px=224, square_crop=True)
vecs = []
for im in ims:
pim = tx(im)
emb = db.embedding.from_image(preprocessed_image=pim.float())
emb = emb / np.linalg.norm(emb, axis=-1)
vecs.append(emb)
vecs = np.concatenate(vecs)
# print(np.linalg.norm(vecs,axis=-1))
augscore = (vecs @ qvec.reshape(-1)).mean()
return augscore
import torchvision.ops
# torchvision.ops.box_iou()
def box_iou(tup, boxes):
b1 = torch.from_numpy(
np.stack([tup.x1.values, tup.y1.values, tup.x2.values, tup.y2.values], axis=1)
)
bxdata = np.stack(
[boxes.x1.values, boxes.y1.values, boxes.x2.values, boxes.y2.values], axis=1
)
b2 = torch.from_numpy(bxdata)
ious = torchvision.ops.box_iou(b1, b2)
return ious.numpy()
def augment_score2(tup, vec_meta, vecs, *, agg_method, rescore_method):
assert callable(rescore_method)
vec_meta = vec_meta.reset_index(drop=True)
ious = box_iou(tup, vec_meta)
vec_meta = vec_meta.assign(iou=ious.reshape(-1))
max_boxes = vec_meta.groupby("zoom_level").iou.idxmax()
max_boxes = max_boxes.sort_index(
ascending=True
) # largest zoom level (zoomed out) goes last
relevant_meta = vec_meta.iloc[max_boxes]
relevant_iou = (
relevant_meta.iou > 0
) # there should be at least some overlap for it to be relevant
max_boxes = max_boxes[relevant_iou.values]
rel_vecs = vecs[max_boxes]
if agg_method == "avg_score":
sc = rescore_method(rel_vecs)
ws = np.ones_like(sc)
fsc = ws.reshape(-1) @ sc.reshape(-1)
fsc = fsc / ws.sum()
return fsc
elif agg_method == "avg_vector":
merged_vec = rel_vecs.mean(axis=0, keepdims=True)
merged_vec = merged_vec / np.linalg.norm(merged_vec)
return rescore_method(merged_vec)
else:
assert False, f"unknown agg_method {agg_method}"
def get_boxes(vec_meta):
if "x1" in vec_meta.columns:
return vec_meta[["x1", "x2", "y1", "y2"]]
y1 = vec_meta.iis * 112
y2 = y1 + 224
x1 = vec_meta.jjs * 112
x2 = x1 + 224
factor = vec_meta.zoom_factor
boxes = vec_meta.assign(
**{"x1": x1 * factor, "x2": x2 * factor, "y1": y1 * factor, "y2": y2 * factor}
)[["x1", "y1", "x2", "y2"]]
boxes = boxes.astype(
"float32"
) ## multiplication makes this type double but this is too much.
return boxes
def get_pos_negs_all_v2(dbidxs, label_db: LabelDB, vec_meta: pd.DataFrame):
idxs = pr.BitMap(dbidxs)
relvecs = vec_meta[vec_meta.dbidx.isin(idxs)]
pos = []
neg = []
for idx in dbidxs:
acc_vecs = relvecs[relvecs.dbidx == idx]
acc_boxes = get_boxes(acc_vecs)
label_boxes = label_db.get(idx, format="df")
ious = box_iou(label_boxes, acc_boxes)
total_iou = ious.sum(axis=0)
negatives = total_iou == 0
negvec_positions = acc_vecs.index[negatives].values
# get the highest iou positives for each
max_ious_id = np.argmax(ious, axis=1)
max_ious = np.max(ious, axis=1)
pos_idxs = pr.BitMap(max_ious_id[max_ious > 0])
# if label_boxes.shape[0] > 0: # some boxes are size 0 bc. of some bug in the data, so don't assert here.
# assert len(pos_idxs) > 0
posvec_positions = acc_vecs.index[pos_idxs].values
pos.append(posvec_positions)
neg.append(negvec_positions)
posidxs = pr.BitMap(np.concatenate(pos))
negidxs = pr.BitMap(np.concatenate(neg))
return posidxs, negidxs
def build_index(vecs, file_name):
t = annoy.AnnoyIndex(512, "dot")
for i in range(len(vecs)):
t.add_item(i, vecs[i])
t.build(n_trees=100) # tested 100 on bdd, works well, could do more.
t.save(file_name)
u = annoy.AnnoyIndex(512, "dot")
u.load(file_name) # verify can load.
return u
def filter_mask(meta, min_level_inclusive):
gpmax = meta.groupby("dbidx").zoom_level.max().rename("zoom_level_max")
aug_meta = | pd.merge(meta, gpmax, left_on="dbidx", right_index=True) | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 11:12:51 2020
@author: sahand
This is a preprocessing script for Cora dataset [McCallumIRJ]
@article{McCallumIRJ,
author = "<NAME> and <NAME> and <NAME> and <NAME>",
title = "Automating the Construction of Internet Portals with Machine Learning",
journal = "Information Retrieval Journal",
volume = 3,
pages = "127--163",
publisher = "Kluwer",
year = 2000,
note = "www.research.whizbang.com/data"
}
"""
# =============================================================================
# Init
# =============================================================================
dir_path = '/home/sahand/GoogleDrive/Data/Corpus/cora-classify/cora/clean/single_component_small/' # ryzen
# dir_path = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/cora-classify/cora/' # c1314
import json
from os import listdir
from os.path import isfile, join
import pandas as pd
import numpy as np
import networkx as nx
from tqdm import tqdm
import gc
tqdm.pandas()
from sciosci.assets import text_assets
# =============================================================================
# read JSON and lists from Cora data
# =============================================================================
papers_list_raw = pd.read_csv(dir_path+'papers',sep='\t',names=['id','filename','citation string']) # contains duplicates
# papers_list_raw = papers_list_raw.groupby('id').first().reset_index()
papers_list_labeled = pd.read_csv(dir_path+'classifications',sep='\t',names=['filename','class'])
papers_list_labeled = papers_list_labeled[pd.notna(papers_list_labeled['class'])]
citations = pd.read_csv(dir_path+'citations',names=['referring_id','cited_id'],sep='\t')
# =============================================================================
# Prepare classes
# =============================================================================
def cleanup(arr):
try:
return np.array([x for x in arr if x!=''])
except:
print('\nGot',arr,', which is not a list. returning as-is.')
return np.array(arr)
labels = pd.DataFrame(list(papers_list_labeled['class'].str.split('/').progress_apply(lambda x: cleanup(x))))
labels.columns = ['class1','class2','class3']
papers_list_labeled = pd.concat([papers_list_labeled,labels],axis=1)
# Inspect classes
label_names = [str(x) for x in list(labels.groupby('class1').groups.keys())]
# =============================================================================
# Read text files
# =============================================================================
mypath = dir_path+'extractions'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
columns = ['filename','URL','Refering-URL','Root-URL','Email','Title','Author','Address','Affiliation','Note','Abstract','References-found']
papers_df = pd.DataFrame([],columns=columns)
log = []
for f_name in tqdm(files):
# f_name = 'http:##www.win.tue.nl#win#cs#fm#Dennis.Dams#Papers#dgg95a.ps.gz'
f = open(join(mypath,f_name), "r")
paper = [['filename',f_name]]
try:
tmp = f.read().split('\n')
except:
print('Failed to read file ',f_name,'\nLook at the final log for the list of such files.')
log.append(['reading failed',f_name])
continue
for line in tmp:
if line!='':
ar = line.split(': ', 1)
if len(ar)>1:
paper.append(ar)
paper_np = np.array(paper)
paper = pd.DataFrame(paper_np.T[1])
paper.index = paper_np.T[0]
paper = paper.T
paper = paper[paper.columns[paper.columns.isin(columns)]]
# papers_df = papers_df.append(paper)[papers_df.columns]
try:
papers_df = pd.concat([papers_df,paper])
except:
print('Something went wrong when concatenating the file',f_name,'\nLook at the final log for the list of such files.')
log.append(['concatenating failed',f_name])
papers_df.to_csv(dir_path+'extractions.csv',index=False)
log=pd.DataFrame(log,columns=['error','file'])
log.to_csv(dir_path+'extractions_log')
# =============================================================================
# Merge based on file name to get the idx
# =============================================================================
merged = pd.merge(papers_df, papers_list_raw, on='filename')
merged.to_csv(dir_path+'extractions_with_id.csv',index=False)
sample = merged.sample(5)
# =============================================================================
# Further pre-process to get unique abstracts
# =============================================================================
data = merged.copy()
data = pd.read_csv(dir_path+'extractions_with_id.csv')
# =============================================================================
# Merge based on file name to get the idx
# =============================================================================
merged = pd.merge(papers_list_labeled, data, on='filename')
merged.to_csv(dir_path+'extractions_with_unique_id_labeled.csv',index=False)
sample = merged.sample(5)
# =============================================================================
#
# =============================================================================
data = merged.copy()
data = pd.read_csv(dir_path+'extractions_with_id.csv')
data_clean = data[pd.notna(data['Abstract'])]
data_clean = data_clean[data_clean['Abstract']!='']
data_clean = data_clean[data_clean['Abstract']!=' ']
data_clean = data_clean[pd.notnull(data_clean['Abstract'])]
data_clean = data_clean[pd.notna(data_clean['Title'])]
data_clean = data_clean[pd.notna(data_clean['id'])]
data_clean_unique = data_clean.groupby('id').first().reset_index()
data_clean_unique.to_csv(dir_path+'extractions_with_unique_id.csv',index=False)
sample = data_clean_unique.sample(500)
# =============================================================================
# Filter citations based on the papers
# =============================================================================
data = | pd.read_csv(dir_path+'corpus_idx_original') | pandas.read_csv |
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from collections import Counter
import seaborn as sns
from itertools import combinations
from matplotlib import gridspec
from IPython.display import display
from typing import Union, List, Tuple
from .plot_utils import *
from .data_utils import trim_values, compute_time_deltas, convert_to_freq_string
from .univariate_plots import (
histogram,
boxplot,
countplot,
time_series_countplot,
plot_ngrams,
)
from .bivariate_plots import time_series_plot
from .config import TIME_UNITS
import calendar
import tldextract
FLIP_LEVEL_MINIMUM = 5
def compute_univariate_summary_table(
data: pd.DataFrame, column: str, data_type: str, lower_trim=0, upper_trim=0
) -> pd.DataFrame:
"""
Computes summary statistics for a numerical pandas DataFrame column.
Computed statistics include some subset of the following depending on data type:
- mean and median
- min and max
- 25% percentile
- 75% percentile
- standard deviation and interquartile range
- count and percentage of missing values
Args:
data: The dataframe with the column to summarize
column: The column in the dataframe to summarize
data_type: Type of column to use to determine summary values to return
lower_trim: Number of values to trim from lower end of distribution
upper_trim: Number of values to trim from upper end of distribution
Returns:
pandas DataFrame with one row containing the summary statistics for the provided column
"""
data = trim_values(data, column, lower_trim, upper_trim)
# Get summary table
count_missing = data[column].isnull().sum()
perc_missing = 100 * count_missing / data.shape[0]
count_obs = data.shape[0] - count_missing
count_levels = data[column].nunique()
counts_table = pd.DataFrame(
{
"count_observed": [count_obs],
"count_unique": [count_levels],
"count_missing": [count_missing],
"percent_missing": [perc_missing],
},
index=[column],
)
if data_type == "datetime":
counts_table["min"] = data[column].min()
counts_table["25%"] = data[column].quantile(0.25)
counts_table["median"] = data[column].median()
counts_table["75%"] = data[column].quantile(0.75)
counts_table["max"] = data[column].max()
counts_table["iqr"] = data[column].quantile(0.75) - data[column].quantile(0.25)
return counts_table
elif data_type in ["numeric", "datetime"]:
stats_table = pd.DataFrame(data[column].describe()).T
stats_table["iqr"] = data[column].quantile(0.75) - data[column].quantile(0.25)
stats_table = stats_table[
["min", "25%", "50%", "mean", "75%", "max", "std", "iqr"]
]
stats_table = stats_table.rename({"50%": "median"}, axis="columns")
return pd.concat([counts_table, stats_table], axis=1)
else:
return counts_table
def categorical_univariate_summary(
data: pd.DataFrame,
column: str,
fig_height: int = 5,
fig_width: int = 10,
fontsize: int = 15,
color_palette: str = None,
order: Union[str, List] = "auto",
max_levels: int = 30,
label_rotation: Optional[int] = None,
label_fontsize: Optional[float] = None,
flip_axis: Optional[bool] = None,
percent_axis: bool = True,
label_counts: bool = True,
include_missing: bool = False,
interactive: bool = False,
) -> Tuple[pd.DataFrame, plt.Figure]:
"""
Creates a univariate EDA summary for a provided categorical data column in a pandas DataFrame.
Summary consists of a count plot with twin axes for counts and percentages for each level of the
variable and a small summary table.
Args:
data: pandas DataFrame with data to be plotted
column: column in the dataframe to plot
fig_width: figure width in inches
fig_height: figure height in inches
fontsize: Font size of axis and tick labels
color_palette: Seaborn color palette to use
order: Order in which to sort the levels of the variable for plotting:
- **'auto'**: sorts ordinal variables by provided ordering, nominal variables by descending frequency, and numeric variables in sorted order.
- **'descending'**: sorts in descending frequency.
- **'ascending'**: sorts in ascending frequency.
- **'sorted'**: sorts according to sorted order of the levels themselves.
- **'random'**: produces a random order. Useful if there are too many levels for one plot.
Or you can pass a list of level names in directly for your own custom order.
max_levels: Maximum number of levels to attempt to plot on a single plot. If exceeded, only the
max_level - 1 levels will be plotted and the remainder will be grouped into an 'Other' category.
percent_axis: Whether to add a twin y axis with percentages
label_counts: Whether to add exact counts and percentages as text annotations on each bar in the plot.
label_fontsize: Size of the annotations text. Default tries to infer a reasonable size based on the figure
size and number of levels.
flip_axis: Whether to flip the plot so labels are on y axis. Useful for long level names or lots of levels.
Default tries to infer based on number of levels and label_rotation value.
label_rotation: Amount to rotate level labels. Useful for long level names or lots of levels.
include_missing: Whether to include missing values as an additional level in the data
interactive: Whether to display plot and table for interactive use in a jupyter notebook
Returns:
Summary table and matplotlib figure with countplot
Example:
.. plot::
import seaborn as sns
import intedact
data = sns.load_dataset('tips')
intedact.categorical_univariate_summary(data, 'day', interactive=True)
"""
data = data.copy()
if flip_axis is None:
flip_axis = data[column].nunique() > FLIP_LEVEL_MINIMUM
if color_palette != "":
sns.set_palette(color_palette)
else:
sns.set_palette("tab10")
# Get summary table
summary_table = compute_univariate_summary_table(data, column, "categorical")
# Plot countplot
fig, axs = plt.subplots(1, 1, figsize=(fig_width, fig_height))
ax = countplot(
data,
column,
order=order,
max_levels=max_levels,
percent_axis=percent_axis,
label_counts=label_counts,
flip_axis=flip_axis,
label_fontsize=label_fontsize,
include_missing=include_missing,
label_rotation=label_rotation,
fontsize=fontsize,
)
if interactive:
display(summary_table)
plt.show()
return summary_table, fig
def numeric_univariate_summary(
data: pd.DataFrame,
column: str,
fig_height: int = 4,
fig_width: int = 8,
fontsize: int = 15,
color_palette: str = None,
bins: Optional[int] = None,
transform: str = "identity",
clip: float = 0,
kde: bool = False,
lower_trim: int = 0,
upper_trim: int = 0,
interactive: bool = False,
) -> Tuple[pd.DataFrame, plt.Figure]:
"""
Creates a univariate EDA summary for a provided high cardinality numeric data column in a pandas DataFrame.
Summary consists of a histogram, boxplot, and small table of summary statistics.
Args:
data: pandas DataFrame to perform EDA on
column: A string matching a column in the data to visualize
fig_height: Height of the plot in inches
fig_width: Width of the plot in inches
fontsize: Font size of axis and tick labels
color_palette: Seaborn color palette to use
bins: Number of bins to use for the histogram. Default is to determines # of bins from the data
transform: Transformation to apply to the data for plotting:
- 'identity': no transformation
- 'log': apply a logarithmic transformation with small constant added in case of zero values
- 'log_exclude0': apply a logarithmic transformation with zero values removed
- 'sqrt': apply a square root transformation
kde: Whether to overlay a KDE plot on the histogram
lower_trim: Number of values to trim from lower end of distribution
upper_trim: Number of values to trim from upper end of distribution
interactive: Whether to modify to be used with interactive for ipywidgets
Returns:
Tuple containing matplotlib Figure drawn and summary stats DataFrame
Example:
.. plot::
import seaborn as sns
import intedact
data = sns.load_dataset('tips')
intedact.numeric_univariate_summary(data, 'total_bill', interactive=True)[0]
"""
data = data.copy()
if color_palette != "":
sns.set_palette(color_palette)
else:
sns.set_palette("tab10")
# Get summary table
table = compute_univariate_summary_table(
data, column, "numeric", lower_trim, upper_trim
)
f, axs = plt.subplots(2, 1, figsize=(fig_width, fig_height * 2))
histogram(
data,
column,
ax=axs[0],
bins=bins,
transform=transform,
clip=clip,
lower_trim=lower_trim,
upper_trim=upper_trim,
kde=kde,
)
axs[0].set_xlabel("")
boxplot(
data,
column,
ax=axs[1],
transform=transform,
lower_trim=lower_trim,
upper_trim=upper_trim,
)
set_fontsize(axs[0], fontsize)
set_fontsize(axs[1], fontsize)
if interactive:
display(table)
plt.show()
return table, f
def datetime_univariate_summary(
data: pd.DataFrame,
column: str,
fig_height: int = 4,
fig_width: int = 8,
fontsize: int = 15,
color_palette: str = None,
ts_freq: str = "auto",
delta_units: str = "auto",
ts_type: str = "line",
trend_line: str = "auto",
date_labels: Optional[str] = None,
date_breaks: Optional[str] = None,
lower_trim: int = 0,
upper_trim: int = 0,
interactive: bool = False,
) -> Tuple[pd.DataFrame, plt.Figure]:
"""
Creates a univariate EDA summary for a provided datetime data column in a pandas DataFrame.
Produces the following summary plots:
- a time series plot of counts aggregated at the temporal resolution provided by ts_freq
- a time series plot of time deltas between successive observations in units defined by delta_freq
- countplots for the following metadata from the datetime object:
- day of week
- day of month
- month
- year
- hour
- minute
Args:
data: pandas DataFrame to perform EDA on
column: A string matching a column in the data
fig_height: Height of the plot in inches
fig_width: Width of the plot in inches
fontsize: Font size of axis and tick labels
color_palette: Seaborn color palette to use
ts_freq: String describing the frequency at which to aggregate data in one of two formats:
- A `pandas offset string <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`_.
- A human readable string in the same format passed to date breaks (e.g. "4 months")
Default is to attempt to intelligently determine a good aggregation frequency.
delta_units: String describing the units in which to compute time deltas between successive observations in one of two formats:
- A `pandas offset string <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`_.
- A human readable string in the same format passed to date breaks (e.g. "4 months")
Default is to attempt to intelligently determine a good frequency unit.
ts_type: 'line' plots a line graph while 'point' plots points for observations
trend_line: Trend line to plot over data. "None" produces no trend line. Other options are passed
to `geom_smooth <https://plotnine.readthedocs.io/en/stable/generated/plotnine.geoms.geom_smooth.html>`_.
date_labels: strftime date formatting string that will be used to set the format of the x axis tick labels
date_breaks: Date breaks string in form '{interval} {period}'. Interval must be an integer and period must be
a time period ranging from seconds to years. (e.g. '1 year', '3 minutes')
lower_trim: Number of values to trim from lower end of distribution
upper_trim: Number of values to trim from upper end of distribution
interactive: Whether to display figures and tables in jupyter notebook for interactive use
Returns:
Tuple containing matplotlib Figure drawn and summary stats DataFrame
Examples:
.. plot::
import pandas as pd
import intedact
data = pd.read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/tidytuesday_tweets/data.csv")
data['created_at'] = pd.to_datetime(data.created_at)
intedact.datetime_univariate_summary(data, 'created_at', ts_freq='1 week', delta_freq='1 hour')
"""
data = data.copy()
data = trim_values(data, column, lower_trim, upper_trim)
if trend_line == "none":
trend_line = None
if date_breaks == "auto":
date_breaks = None
if date_labels == "auto":
date_labels = None
if color_palette != "":
sns.set_palette(color_palette)
else:
sns.set_palette("tab10")
# Compute extra columns with datetime attributes
data["Month"] = data[column].dt.month_name()
data["Day of Month"] = data[column].dt.day
data["Year"] = data[column].dt.year
data["Hour"] = data[column].dt.hour
data["Day of Week"] = data[column].dt.day_name()
# Compute time deltas
data["deltas"], delta_units = compute_time_deltas(data[column], delta_units)
# Compute summary table
table = compute_univariate_summary_table(data, column, "datetime")
delta_table = compute_univariate_summary_table(
data.iloc[1:, :], "deltas", "numeric"
)
delta_table.index = [f"Time Deltas ({delta_units})"]
table = pd.concat([table, delta_table], axis=0)
if interactive:
display(table)
fig = plt.figure(figsize=(fig_width, fig_height * 4))
spec = gridspec.GridSpec(ncols=2, nrows=5, figure=fig)
# time series count plot
ax = fig.add_subplot(spec[0, :])
ax = time_series_countplot(
data,
column,
ax,
ts_freq=ts_freq,
ts_type=ts_type,
trend_line=trend_line,
date_breaks=date_breaks,
date_labels=date_labels,
)
set_fontsize(ax, fontsize)
# Summary plots of time deltas
ax = fig.add_subplot(spec[1, 0])
ax = histogram(data, "deltas", ax=ax)
ax.set_xlabel(f"{delta_units.title()} between observations")
set_fontsize(ax, fontsize)
ax = fig.add_subplot(spec[1, 1])
ax = boxplot(data, "deltas", ax=ax)
ax.set_xlabel(f"{delta_units.title()} between observations")
set_fontsize(ax, fontsize)
# countplot by month
data["Month"] = pd.Categorical(
data["Month"], categories=list(calendar.month_name)[1:], ordered=True
)
ax = fig.add_subplot(spec[2, 0])
ax = countplot(
data,
"Month",
ax,
label_fontsize=10,
flip_axis=True,
fontsize=fontsize,
)
# countplot by day of month
data["Day of Month"] = pd.Categorical(
data["Day of Month"], categories=np.arange(1, 32, 1), ordered=True
)
ax = fig.add_subplot(spec[2, 1])
ax = countplot(
data,
"Day of Month",
ax,
label_counts=False,
flip_axis=True,
max_levels=35,
fontsize=fontsize,
)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=9)
# countplot by day of week
data["Day of Week"] = pd.Categorical(
data["Day of Week"], categories=list(calendar.day_name), ordered=True
)
ax = fig.add_subplot(spec[3, 0])
ax = countplot(
data,
"Day of Week",
ax,
label_fontsize=10,
flip_axis=True,
fontsize=fontsize,
)
# countplot by hour of day
data["Hour"] = pd.Categorical(
data["Hour"], categories=np.arange(0, 24, 1), ordered=True
)
ax = fig.add_subplot(spec[3, 1])
ax = countplot(
data, "Hour", ax, label_counts=False, flip_axis=True, fontsize=fontsize
)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=9)
plt.tight_layout()
if interactive:
plt.show()
return table, fig
def text_univariate_summary(
data: pd.DataFrame,
column: str,
fig_height: int = 6,
fig_width: int = 18,
fontsize: int = 15,
color_palette: Optional[str] = None,
top_ngrams: int = 10,
compute_ngrams: bool = True,
remove_punct: bool = True,
remove_stop: bool = True,
lower_case: bool = True,
interactive: bool = False,
) -> Tuple[pd.DataFrame, plt.Figure]:
"""
Creates a univariate EDA summary for a provided text variable column in a pandas DataFrame. Currently only
supports English.
For the provided column produces:
- histograms of token and character counts across entries
- boxplot of document frequencies
- countplots with top unigrams, bigrams, and trigrams
Args:
data: Dataset to perform EDA on
column: A string matching a column in the data
fig_height: Height of the plot in inches
fig_width: Width of the plot in inches
fontsize: Font size of axis and tick labels
color_palette: Seaborn color palette to use
top_ngrams: Maximum number of ngrams to plot for the top most frequent unigrams to trigrams
compute_ngrams: Whether to compute and display most common ngrams
remove_punct: Whether to remove punctuation during tokenization
remove_stop: Whether to remove stop words during tokenization
lower_case: Whether to lower case text for tokenization
interactive: Whether to display figures and tables in jupyter notebook for interactive use
Returns:
Tuple containing matplotlib Figure drawn and summary stats DataFrame
"""
from nltk import word_tokenize
from nltk.corpus import stopwords
if color_palette != "":
sns.set_palette(color_palette)
else:
sns.set_palette("tab10")
data = data.copy()
data = data.dropna(subset=[column])
# Compute number of characters per document
data["# Characters / Document"] = data[column].apply(lambda x: len(x))
# Tokenize the text
data["tokens"] = data[column].apply(lambda x: [w for w in word_tokenize(x)])
if lower_case:
data["tokens"] = data["tokens"].apply(lambda x: [w.lower() for w in x])
if remove_stop:
stop_words = set(stopwords.words("english"))
data["tokens"] = data["tokens"].apply(
lambda x: [w for w in x if w.lower() not in stop_words]
)
if remove_punct:
data["tokens"] = data["tokens"].apply(lambda x: [w for w in x if w.isalnum()])
data["# Tokens / Document"] = data["tokens"].apply(lambda x: len(x))
# Compute summary table
table = compute_univariate_summary_table(data, column, "categorical")
table["vocab_size"] = len(set([x for y in data["tokens"] for x in y]))
tokens_table = compute_univariate_summary_table(
data, "# Tokens / Document", "numeric"
)
char_table = compute_univariate_summary_table(
data, "# Characters / Document", "numeric"
)
table = | pd.concat([table, tokens_table, char_table], axis=0) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline
from greykite.common.constants import ACTUAL_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.python_utils import assert_equal
from greykite.sklearn.estimator.null_model import DummyEstimator
from greykite.sklearn.estimator.prophet_estimator import ProphetEstimator
from greykite.sklearn.transform.column_selector import ColumnSelector
from greykite.sklearn.transform.null_transformer import NullTransformer
from greykite.sklearn.transform.pandas_feature_union import PandasFeatureUnion
from greykite.sklearn.transform.zscore_outlier_transformer import ZscoreOutlierTransformer
@pytest.fixture
def X():
"""dataset for test cases"""
size = 20
return pd.DataFrame({
TIME_COL: pd.date_range(start="2018-01-01", periods=size, freq="H"),
ACTUAL_COL: np.random.normal(scale=10, size=size),
VALUE_COL: np.random.normal(scale=10, size=size)
})
@pytest.fixture
def fs():
"""feature transformation pipeline for test cases"""
return PandasFeatureUnion([
("date", Pipeline([
("select_date", ColumnSelector([TIME_COL])) # leaves time column unmodified
])),
("response", Pipeline([ # applies outlier and null transformation to value column
("select_val", ColumnSelector([VALUE_COL])),
("outlier", ZscoreOutlierTransformer()),
("null", NullTransformer())
]))
])
def test_feature_union(X):
"""Tests PandasFeatureUnion on simple projection
Inspired by sklearn/tests/test_pipeline.py"""
# basic sanity check for feature union
select_value = ColumnSelector(column_names=[VALUE_COL])
select_actual = ColumnSelector(column_names=[ACTUAL_COL])
select_time = ColumnSelector(column_names=[TIME_COL])
fs = PandasFeatureUnion([("select_value", select_value),
("select_actual", select_actual),
("select_time", select_time),
("select_time_again", select_time)])
fs.fit(X)
X_transformed = fs.transform(X)
assert X_transformed.shape == (X.shape[0], 4)
# note that columns are selected in the order specified. There is no column renaming by default
assert np.all(X_transformed.columns.values == np.array([VALUE_COL, ACTUAL_COL, TIME_COL, TIME_COL]))
assert X_transformed.equals(pd.concat([
X[[VALUE_COL]],
X[[ACTUAL_COL]],
X[[TIME_COL]],
X[[TIME_COL]],
], axis=1))
def test_transformer_union(X, fs):
"""Tests PandasFeatureUnion on a pipeline of transformers, with custom parameters"""
# sets parameters and fits model
z_cutoff = 2.0
fs.set_params(response__outlier__z_cutoff=z_cutoff)
fs.fit(X)
X_transformed = fs.transform(X)
# checks shape
assert X_transformed.shape == (X.shape[0], 2)
assert list(X_transformed.columns) == [TIME_COL, VALUE_COL]
# checks output result
X_after_column_select = ColumnSelector([VALUE_COL]).fit_transform(X)
X_after_z_score = ZscoreOutlierTransformer(z_cutoff=z_cutoff).fit_transform(X_after_column_select)
X_after_null = NullTransformer().fit_transform(X_after_z_score)
assert_equal(X_transformed[TIME_COL], X[TIME_COL])
assert_equal(X_transformed[VALUE_COL], X_after_null[VALUE_COL])
def test_pipeline_union(X, fs):
"""Tests PandasFeatureUnion on a pipeline of transformers and estimator, and shows
that null model extracted from estimator in pipeline is equivalent to null model trained
directly"""
model_estimator = Pipeline([
("input", fs),
("estimator", ProphetEstimator(score_func=mean_squared_error,
coverage=0.80,
null_model_params={"strategy": "mean"}))
])
# fits pipeline with estimator, and extract dummy null model
z_cutoff = 2.0
model_estimator.set_params(input__response__outlier__z_cutoff=z_cutoff)
model_estimator.fit(X)
output_estimator_null = model_estimator.steps[-1][-1].null_model.predict(X)
# fits pipeline with dummy estimator
model_dummy = Pipeline([
("input", fs),
("dummy", DummyEstimator(score_func=mean_squared_error, strategy="mean"))
])
model_dummy.fit(X)
output_dummy = model_dummy.predict(X)
# fits dummy estimator by hand, without Pipeline
X_after_column_select = ColumnSelector([VALUE_COL]).fit_transform(X)
X_after_z_score = ZscoreOutlierTransformer(z_cutoff=z_cutoff).fit_transform(X_after_column_select)
X_after_null = NullTransformer().fit_transform(X_after_z_score)
X_after_union = | pd.concat([X[TIME_COL], X_after_null], axis=1) | pandas.concat |
import os
from pathlib import Path
import sys
from time import strptime
import path_config
import requests
from bs4 import BeautifulSoup
import pandas as pd
class EspnTournament():
def __init__(self) -> None:
self.tournament_info = {
"tournament_id":"",
"tournament_name":"",
"tournament_date":"",
"tournament_purse":"",
"win_total":"",
"tournament_size":"",
"winner_name":"",
"winner_id":"",
"season_id":"",
}
def __getitem__(self, i):
return self.tournament_info[i]
def set_all_w(self, w_name, w_id, w_total):
self.tournament_info["winner_name"] = w_name
self.tournament_info["winner_id"] = w_id
self.tournament_info["win_total"] = w_total
def set_all_missing(self):
self.tournament_info["win_total"] = None
self.tournament_info["tournament_size"] = None
self.tournament_info["winner_name"] = None
self.tournament_info["winner_id"] = None
def get_tournament_id(self):
return self.tournament_info["tournament_id"]
def set_tournament_id(self, url):
"""Set tournament id from a url.
Parameters
----------
url : str
ESPN tournament url.
Examples
--------
>>> espn_t = EspnTournament()
>>> t_url = "https://www.espn.com/golf/leaderboard?tournamentId=3802"
>>> espn_t.set_tournament_id(t_url)
"""
t_id = url[url.rfind("=") + 1:]
self.tournament_info["tournament_id"] = t_id
def get_tournament_name(self):
return self.tournament_info["tournament_name"]
def set_tournament_name(self, tourn_meta):
"""Set tournament name from a tournament meta.
Parameters
----------
tournament_meta : element.Tag
child of Leaderboard__Header class to find tournament name.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_id(tourn_meta)
"""
tourn_name = tourn_meta.find("h1").text
self.tournament_info["tournament_name"] = tourn_name
def parse_espn_dates(self, date, identifier, b_identifier=True):
"""Parse for subset date of the original date
Parameters
----------
date : str
ESPN tournament date to parse.
identifier : str
Identifier to be searched for.
b_identifier : bool, optional
Flag to tell where subset search begins.
Returns
-------
str
Parsed ESPN date.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.parse_espn_dates("Oct 5-8 2018", "-")
"Oct 5"
"""
if b_identifier:
if date.find(identifier) != -1:
b_idx = date.find(identifier)
# Should return month
n_date = date[:b_idx].rstrip()
return n_date
else:
# special case of only one date in link
b_idx = date.find(",")
n_date = date[:b_idx]
return n_date
else:
if date.find(identifier) != -1:
a_idx = date.find(identifier)
# Should return day
return date[a_idx: ]
else:
print("Did not find identifier in string for: ", date)
def date_parser(self, date):
"""Reformat ESPN tournament date.
Parameters
----------
date : str
Date to parse.
Returns
-------
str
Reformatted ESPN date.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.date_parser("Oct 5-8 2018")
"10/5/2018"
"""
year = date[date.rfind(" ")+1:]
month_and_day = self.parse_espn_dates(date, "-")
day = self.parse_espn_dates(month_and_day, " ", b_identifier=False)
day = day.lstrip()
month = self.parse_espn_dates(month_and_day, " ", b_identifier=True)
month_abr = month[:3]
month_number = strptime(month_abr, "%b").tm_mon
date_str = str(month_number) + "/" + day + "/" + year
return date_str
def get_date(self):
return self.tournament_info["tournament_date"]
def set_date(self, tourn_meta):
"""Set tournament date from a tournament meta.
Parameters
----------
tourn_meta : element.Tag
child of Leaderboard__Header class.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_date(tourn_meta)
"""
tourn_date = tourn_meta.find("span").text
t_date = self.date_parser(tourn_date)
self.tournament_info["tournament_date"] = t_date
def get_tournament_purse(self):
return self.tournament_info["tournament_purse"]
def set_tournament_purse(self, tourn_header):
"""Set tournament purse from a tournament header.
Parameters
----------
tourn_header : element.Tag
Leaderboard__Header class.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_purse(tourn_header)
"""
purse_class = tourn_header.find("div", class_="n7 clr-gray-04").text
# string find method
purse_start = purse_class.find("$") + 1
if purse_class.find("D") != -1:
purse_end = purse_class.find("D")
purse = purse_class[purse_start:purse_end]
else:
purse = purse_class[purse_start:]
purse = purse.replace(",", "")
self.tournament_info["tournament_purse"] = purse
def get_winning_score(self):
return self.tournament_info["win_total"]
def set_winning_score(self, t_body):
"""Set winning score total from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winning_score(t_body)
"""
# tournament winner's total's data
tourn_totals = t_body.find("td", class_="Table__TD")
if tourn_totals:
totals = tourn_totals.find_next_siblings()
if len(totals) == 9:
# selects 4 round (72 hole) total
total = totals[-3].text
self.tournament_info["win_total"] = total
else:
total = totals[-3].text
if len(total) == 0:
self.tournament_info["win_total"] = None
else:
self.tournament_info["win_total"] = total
def get_tournament_size(self):
return self.tournament_info["tournament_size"]
def set_tournament_size(self, t_body):
"""Set tournament size from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_size(t_body)
"""
players = t_body.find_all("tr", class_="Table__TR Table__even")
if players is not None:
num_players = len(players)
self.tournament_info["tournament_size"] = num_players
def get_winner_name(self):
return self.tournament_info["winner_name"]
def set_winner_name(self, t_body):
"""Set winner name from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winner_name(t_body)
"""
winner = t_body.find("a")
if winner:
name = winner.text
self.tournament_info["winner_name"] = name
else:
self.tournament_info["winner_name"] = None
def get_winner_id(self):
return self.tournament_info["winner_id"]
def set_winner_id(self, t_body):
"""Set winner id from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winner_id(t_body)
"""
winner = t_body.find("a")
if winner:
winner_id = winner["href"]
# substring start and end indexes
start_winner = winner_id.find("id/") + 3
end_winner = winner_id.rfind("/")
id = winner_id[start_winner:end_winner]
self.tournament_info["winner_id"] = id
else:
self.tournament_info["winner_id"] = None
def get_season_id(self):
return self.tournament_info["season_id"]
def set_season_id(self, s_id):
"""Set season identifier from s_id.
Parameters
----------
s_id : int
Season identifier to set.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_season_id(2018)
"""
self.tournament_info["season_id"] = s_id
class EspnSeason():
def __init__(self, start, end=None) -> None:
b_url = "https://www.espn.com/golf/schedule/_/season/"
if end is not None:
season_urls = [b_url + str(season) for season in range(start, end+1)]
self.end = end
else:
season_urls = [f"{b_url}{start}"]
self.end = None
self.start = start
self.season_urls = season_urls
self.season_data = []
def retrieve_tournament_info(self, t_url, s_id):
"""Retrieve tournament information from tournament url and season id.
Parameters
----------
t_url : str
Tournament url to extract information.
s_id : int
Season identifier.
Examples
--------
>>> tournament_url = "https://www.espn.com/golf/leaderboard?tournamentId=3802"
>>> espn_t.retrieve_tournament_info(tournament_url, 2017)
"""
espn_t = EspnTournament()
with requests.Session() as session:
page = session.get(t_url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, "html.parser")
header = soup.find("div", class_="Leaderboard__Header")
mt4 = header.find_all("div", class_="mt4")
tourn_meta = mt4[-1]
espn_t.set_tournament_id(t_url)
espn_t.set_tournament_name(tourn_meta)
espn_t.set_date(tourn_meta)
espn_t.set_tournament_purse(header)
# Table's on webpage. index with -1 in case of playoff table
tourn_tables = soup.select("div.ResponsiveTable")
if tourn_tables:
# win_total, tournamnet_size, winner_name, winner_id
tourn_table = tourn_tables[-1]
tourn_body = tourn_table.find("tbody", class_="Table__TBODY")
espn_t.set_winning_score(tourn_body)
espn_t.set_tournament_size(tourn_body)
espn_t.set_winner_name(tourn_body)
espn_t.set_winner_id(tourn_body)
espn_t.set_season_id(s_id)
if espn_t.get_tournament_id() == "2277":
espn_t.set_all_w("<NAME>", "1037", "265")
else:
print(f"No div.ResponsiveTable, (Tournament {espn_t.get_tournament_id()} Cancelled)")
espn_t.set_all_missing()
espn_t.set_season_id(s_id)
self.season_data.append(espn_t)
def retrieve_season(self, season_url):
"""Retrieve season from season url.
Parameters
----------
season_url : str
Season url to extract information.
Examples
--------
>>> espn_s = EspnSeason(2018)
>>> season_url = "https://www.espn.com/golf/schedule/_/season/2018"
>>> espn_s.retrieve_season(season_url)
"""
with requests.Session() as session:
page = session.get(season_url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, "html.parser")
season_table = soup.select("div.ResponsiveTable")
if season_table is not None:
season_body = season_table[0].find("tbody", class_="Table__TBODY")
tournaments = season_body.find_all("div", class_="eventAndLocation__innerCell")
if tournaments is not None:
for tournament in tournaments:
tournament_url = tournament.find("a")
if tournament_url:
t_url = tournament_url["href"]
print(f"Fetching {t_url} data")
season_id = season_url[season_url.rfind("/")+1 :]
self.retrieve_tournament_info(t_url, season_id)
else:
print(f"Error retrieving page. page status code: {page.status_code}")
def retrieve_all_seasons(self):
"""Retrieve all seasons set from constructor.
Examples
--------
>>> espn_s = EspnSeason(2018)
>>> espn_s.retrieve_all_seasons()
"""
for season in self.season_urls:
self.retrieve_season(season)
def feed_season_data(self):
"""Feed all season data held.
Returns
-------
pd.DataFrame
Season data in dataframe.
Examples
--------
>>> e_season = EspnSeason(2018)
>>> e_season.retrieve_all_seasons()
>>> df = e_season.feed_season_data()
"""
if self.season_data is not None:
data = [tournament.tournament_info for tournament in self.season_data]
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import requests
import streamlit as st
from requests.exceptions import HTTPError
from .base import Component
class DataSelector(Component):
def __init__(self, *args):
super(DataSelector, self).__init__(*args)
self.url_app = self.API_URL + '/applications/'
self.query = dict(limit=10, offset=0)
@property
def __occupation_type_choices(self):
"""Get available options for choice field occupation_type."""
json_res = self.__fetch(None, method='OPTIONS')
occupation_type = json_res['actions']['POST']['occupation_type']
return [None] + [x['value'] for x in occupation_type['choices']]
@property
def __credit_type_choices(self):
"""Get available options for choice field name_contract_type."""
json_res = self.__fetch(None, method='OPTIONS')
credit_type = json_res['actions']['POST']['name_contract_type']
return [None] + [x['value'] for x in credit_type['choices']]
def __fetch(self, query, url=None, method='GET'):
"""Fetch data from API.
args::
* query (dict) : params that be passed as query in url.
kwargs::
* url (str) : url that you want to fetch. default is None make
a request to self.url_app.
* method (str): Http that you want to use. default is GET.
should be one of GET or OPTIONS
"""
if method == 'GET':
response = requests.get(url if url else self.url_app,
headers=self.headers,
params=query)
# get available options for an entry point
elif method == 'OPTIONS':
response = requests.options(self.url_app, headers=self.headers)
if response.status_code != 200:
raise HTTPError(response.status_code)
return response.json()
def form(self):
# Fetch fist record and last record for making slider's boundaries
first = self.__fetch(None,
url=self.url_app+ 'first_record')['sk_id_curr']
last = self.__fetch(None,
url=self.url_app + 'last_record')['sk_id_curr']
id = st.slider('Numéro de dossier',
min_value=first,
max_value=last,
value=(first, last))
# use returned tuple by slider to filter applications
self.query['min_sk_id_curr'], self.query['max_sk_id_curr'] = id
age = st.slider('Age', min_value=18,
max_value=100, value=(18, 100))
# Use returned tuple by age slider to filer applications
self.query['min_days_birth'], self.query['max_days_birth'] = age
occupation_type = st.selectbox('Profession',
options=self.__occupation_type_choices)
# Filter applications by occupation_type
self.query['occupation_type'] = occupation_type
# Filter application by contract name
name_contract_type = st.selectbox('Type de contrat',
options=self.__credit_type_choices)
self.query['name_contract_type'] = name_contract_type
# finally fetch corresponding applications
res = self.__fetch(self.query)
# Display the number of applications that match to filters
st.markdown(f"### Nombre de demandes correspondant à votre requête {res['count']}")
# add a number input that allow us to reload applications for next
# or previous page. The API return paginated applications for
# performance reasons
page_slot = st.empty()
page = page_slot.number_input('page', min_value=0,
max_value=res['count'] // 10,
value=0)
# if page is not null, increase the offset to get the right page
if page:
self.query['offset'] = page * 10
res = self.__fetch(self.query)
slot = st.empty()
results = | pd.DataFrame(res['results']) | pandas.DataFrame |
import psycopg2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display
from wordcloud import WordCloud, ImageColorGenerator
from sklearn.feature_extraction import text
from sklearn.decomposition import LatentDirichletAllocation as LDA
from sklearn.preprocessing import MinMaxScaler
from psycopg2.extensions import register_adapter, AsIs
from PIL import Image
topic_names = [
'climate change strategy',
'water efficiency',
'energy efficiency',
'carbon intensity',
'environmental management system',
'equal opportunities',
'human rights',
'customer responsibility',
'health and safety',
'support community',
'business ethics',
'compliance',
'shareholder democracy',
'socially responsible',
'executive compensation'
]
def get_dataframe_from_database(conn):
select = '''
SELECT name, r.statement, r.lemma FROM reports r
LEFT JOIN company c ON c.ticker = r.company_id;
'''
return pd.io.sql.read_sql_query(select,conn)
def get_stop_words(df):
"""
removing specific keywords, company names as well as
english stop words not to be used in topic modelling
"""
sp_stop_words = [
'plc', 'group', 'target',
'track', 'capital', 'holding',
'annualreport', 'esg', 'bank',
'report','long', 'make',
'table','content', 'wells', 'fargo', 'nxp',
'letter', 'ceo', 'about', 'united', 'states', 'scope'
]
for name in df.name.unique():
for word in name.split():
sp_stop_words.append(word.lower())
return text.ENGLISH_STOP_WORDS.union(sp_stop_words)
def corpus_wide_term_frequencies(df, stop_words,image_file):
"""
create a word cloud for the whole corpus displaying the most frequent terms
using a given back ground image
"""
large_string = ' '.join(df.lemma)
mask = np.array(Image.open(image_file))
font = 'arial.ttf'
colors = ImageColorGenerator(mask)
word_cloud = WordCloud(font_path = font,
background_color="#effbf9",
mask = mask,
max_words=5000,
width=900,
height=700,
stopwords=stop_words,
color_func=colors,
contour_width=1,
contour_color='white'
)
plt.figure(figsize=(20,20))
word_cloud.generate(large_string)
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
return plt
def bigram_analysis(df, stop_words):
"""
using Tf-Idf vectorization to find the most frequent bigrams in the corpus
"""
bigram_tf_idf_vectorizer = text.TfidfVectorizer(stop_words=stop_words, ngram_range=(2,2), min_df=10, use_idf=True)
bigram_tf_idf = bigram_tf_idf_vectorizer.fit_transform(df.lemma)
words = bigram_tf_idf_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in bigram_tf_idf:
total_counts += t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x:x[1], reverse=True)[0:10]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.figure(figsize=(15, 5))
plt.subplot(title='10 most common bi-grams')
sns.barplot(x_pos, counts, color = '#E9C46A') #palette='crest'
plt.xticks(x_pos, words, rotation=45)
plt.xlabel('bi-grams')
plt.ylabel('tfidf')
ax = plt.gca()
ax.set_facecolor('None')
plt.rcParams['figure.facecolor'] = 'None'
return plt
def top_words(model, feature_names, n_top_words):
rows = []
for topic_idx, topic in enumerate(model.components_):
message = ", ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
rows.append(["Topic #%d: " % (topic_idx + 1), message])
return pd.DataFrame(rows, columns=['topic', 'keywords'])
def topic_modelling(df, stop_words, n_components=5): #ichanged here
"""
extract topics from the corpus using Latent Dirichlet Allocation model
"""
word_tf_vectorizer = text.CountVectorizer(stop_words=stop_words, ngram_range=(1,1))
word_tf = word_tf_vectorizer.fit_transform(df.lemma)
lda = LDA(random_state=42, n_components=n_components,learning_decay=0.3)
lda.fit(word_tf)
tf_feature_names = word_tf_vectorizer.get_feature_names()
return lda,tf_feature_names,word_tf
def word_cloud(model, tf_feature_names, index):
imp_words_topic=""
comp = model.components_[index]
vocab_comp = zip(tf_feature_names, comp)
sorted_words = sorted(vocab_comp, key = lambda x:x[1], reverse=True)[:50]
for word in sorted_words:
imp_words_topic = imp_words_topic + " " + word[0]
return WordCloud(
background_color="white",
width=600,
height=600,
contour_width=3,
contour_color='steelblue'
).generate(imp_words_topic)
def display_topics(lda, tf_feature_names):
topics = len(lda.components_)
fig = plt.figure(figsize=(20, 20 * topics / 5))
for i, topic in enumerate(lda.components_):
ax = fig.add_subplot(topics, 3, i + 1)
ax.set_title(topic_names[i], fontsize=20)
wordcloud = word_cloud(lda, tf_feature_names, i)
ax.imshow(wordcloud)
ax.set_facecolor('None')
ax.axis('off')
return plt
def attach_topic_distribution(df, lda,word_tf):
transformed = lda.transform(word_tf)
a = [np.argmax(distribution) for distribution in transformed]
b = [np.max(distribution) for distribution in transformed]
df2 = pd.DataFrame(zip(a,b,transformed), columns=['topic', 'probability', 'probabilities'])
return pd.concat([df, df2], axis=1)
def clear_database(conn):
cursor = conn.cursor()
cursor.execute('''DELETE FROM topics;''')
conn.commit()
cursor.close()
return
def insert_topic_names(conn, topics):
cursor = conn.cursor()
for topic in topics:
cursor.execute('''INSERT INTO topics(topic) VALUES (%s);''', (topic,))
conn.commit()
cursor.close()
return
def update_database(conn,df):
cursor = conn.cursor()
create = '''CREATE TABLE IF NOT EXISTS reports_alt (
id SERIAL PRIMARY KEY,
company_id VARCHAR(5) NOT NULL,
statement TEXT,
lemma TEXT,
topic INTEGER,
probability NUMERIC,
probabilities NUMERIC[],
FOREIGN KEY (company_id)
REFERENCES company(ticker)
ON UPDATE CASCADE ON DELETE CASCADE
);'''
insert = '''
INSERT INTO reports_alt (company_id, statement, lemma, topic, probability, probabilities)
VALUES ((SELECT ticker FROM company WHERE LOWER(company.name) LIKE LOWER(%(name)s) LIMIT 1),
%(statement)s, %(lemma)s, %(topic)s, %(probability)s, ARRAY[%(probabilities)s]);
'''
drop = '''DROP TABLE reports;'''
alter = '''ALTER TABLE reports_alt RENAME TO reports;'''
cursor.execute(create)
for record in df.to_dict('records'):
cursor.mogrify(insert, record)
cursor.execute(insert, record)
cursor.execute(drop)
cursor.execute(alter)
conn.commit()
cursor.close()
return
def adapt_numpy_array(numpy_array):
list = numpy_array.tolist()
return AsIs(list)
def compare_core_initiatives(df):
esg_focus = | pd.crosstab(df.name, df.topic) | pandas.crosstab |
#!/usr/bin/env python
# coding: utf-8
# # Introduction
#
# Previously I built XG Boost models to predict the main and sub-types of Pokemon from all 7 generations (https://www.kaggle.com/xagor1/pokemon-type-predictions-using-xgb). This was relatively successful, but often stalled at around 70% accuracy per generation, with some much worse. To gain more experience with parameter tuning and feature engineering, I decided to revisit just the 1st Generation, and see if I could improve my results.
# In[2]:
#Load various packages
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
import time
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
import xgboost as xgb
from xgboost import plot_importance
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn import metrics
import seaborn as sns
print(os.listdir("../../../input/rounakbanik_pokemon"))
from sklearn.feature_selection import SelectFromModel
from collections import Counter
import warnings
warnings.filterwarnings("ignore")
# # Loading and Modifying Data
#
# To start with, I loaded and modified the data as in the previous kernel.
#
# In contrast to last time, I separated out the numerical and categorical data, and applied one-hot encoding to the latter. This caused the number of features to explode from 24 to 500.
#
# The original plan was to do feature engineering to improve my overall accuracy. However, thus far all my attempts have actually made the predictions worse, so I have left this aside for now.
# In[3]:
#Read data
path = "../../../input/rounakbanik_pokemon/"
egg_df=pd.read_csv(path+"pokemon.csv")
species_df=pd.read_csv(path+"pokemon.csv")
abilities_df=pd.read_csv(path+"pokemon.csv")
#Split duplicates off & combine back
egg2_df=pd.DataFrame.copy(egg_df)
egg2_df=egg_df.loc[egg_df['species_id'].duplicated(), :]
egg_df.drop_duplicates('species_id',inplace=True)
merged = egg_df.merge(egg2_df,on="species_id",how='outer')
merged.fillna(0,inplace=True)
#Rename columns to simpler form.
merged.rename(index=str,columns={"egg_group_id_x":"egg_group_1"},inplace=True)
merged.rename(index=str,columns={"egg_group_id_y":"egg_group_2"},inplace=True)
#Drop last 6 columns
merged.drop(merged.tail(6).index,inplace=True)
#Rename
merged.rename(index=str,columns={"species_id":"pokedex_number"},inplace=True)
#Make a new smaller dataframe
species_trim_df=pd.DataFrame()
species_trim_df["pokedex_number"]=species_df['id']
species_trim_df["color_id"]=species_df['color_id']
species_trim_df["shape_id"]=species_df['shape_id']
species_trim_df["habitat_id"]=species_df['habitat_id']
species_trim_df.drop(species_trim_df.tail(6).index,inplace=True)
#Trim all below Magearna off
abilities_df = abilities_df[abilities_df.pokemon_id < 802]
#Make 3 new columns
abilities_df["Ability1"]=0
abilities_df["Ability2"]=0
abilities_df["Ability3"]=0
#Assign values to the 3 columns based on the ability slot (1-3)
abilities_df["Ability1"] = abilities_df.ability_id.where(abilities_df.slot == 1,0)
abilities_df["Ability2"] = abilities_df.ability_id.where(abilities_df.slot == 2,0)
abilities_df["Ability3"] = abilities_df.ability_id.where(abilities_df.slot == 3,0)
#Split duplicates off into new dataframes
#3 abilities on some means it needs to be split twice
#I'm sure there's an easier way to do this
abilities_df2=pd.DataFrame.copy(abilities_df)
abilities_df2=abilities_df.loc[abilities_df['pokemon_id'].duplicated(), :]
abilities_df.drop_duplicates('pokemon_id',inplace=True)
abilities_df3=pd.DataFrame.copy(abilities_df2)
abilities_df3=abilities_df2.loc[abilities_df2['pokemon_id'].duplicated(), :]
abilities_df2.drop_duplicates('pokemon_id',inplace=True)
#Drop extra columns
abilities_df.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
abilities_df2.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
abilities_df3.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
#Combine everything back
abilities_df=abilities_df.set_index('pokemon_id').add(abilities_df2.set_index('pokemon_id'),fill_value=0).reset_index()
abilities_df=abilities_df.set_index('pokemon_id').add(abilities_df3.set_index('pokemon_id'),fill_value=0).reset_index()
#Rename pokemon_id to pokedex number to allow for merging.
abilities_df.rename(index=str,columns={"pokemon_id":"pokedex_number"},inplace=True)
#Read Kaggle data
path = "../../../input/rounakbanik_pokemon/"
pokemon_df=pd.read_csv(path+"pokemon.csv")
Name_df=pd.DataFrame()
Name_df["name"]=pokemon_df["name"].copy()
#Fix Minior's capture rate
pokemon_df.capture_rate.iloc[773]=30
#Change the type
pokemon_df['capture_rate']=pokemon_df['capture_rate'].astype(str).astype(int)
#Merge all my data.
pokemon_df=pokemon_df.merge(merged,on="pokedex_number",how='outer')
pokemon_df=pokemon_df.merge(species_trim_df,on="pokedex_number",how='outer')
pokemon_df=pokemon_df.merge(abilities_df,on="pokedex_number",how='outer')
#Remove against columns
pokemon_df.drop(list(pokemon_df.filter(regex = 'against')), axis = 1, inplace = True)
#Correct the spelling error
pokemon_df.rename(index=str,columns={"classfication":"classification"},inplace=True)
#Change nan to 'none'
pokemon_df.type2.replace(np.NaN, 'none', inplace=True)
#Drop Pokedex number for now
pokemon_df.drop("pokedex_number",axis=1,inplace=True)
pokemon_df.drop("generation",axis=1,inplace=True)
#First find the NAs.
index_height = pokemon_df['height_m'].index[pokemon_df['height_m'].apply(np.isnan)]
index_weight = pokemon_df['weight_kg'].index[pokemon_df['weight_kg'].apply(np.isnan)]
index_male = pokemon_df['percentage_male'].index[pokemon_df['percentage_male'].apply(np.isnan)]
#Manually replace the missing heights & weights using the Kanto version etc
pokemon_df.height_m.iloc[18]=0.3
pokemon_df.height_m.iloc[19]=0.7
pokemon_df.height_m.iloc[25]=0.8
pokemon_df.height_m.iloc[26]=0.6
pokemon_df.height_m.iloc[27]=1.0
pokemon_df.height_m.iloc[36]=0.6
pokemon_df.height_m.iloc[37]=1.1
pokemon_df.height_m.iloc[49]=0.2
pokemon_df.height_m.iloc[50]=0.7
pokemon_df.height_m.iloc[51]=0.4
pokemon_df.height_m.iloc[52]=1.0
pokemon_df.height_m.iloc[73]=0.4
pokemon_df.height_m.iloc[74]=1.0
pokemon_df.height_m.iloc[75]=1.4
pokemon_df.height_m.iloc[87]=0.9
pokemon_df.height_m.iloc[88]=1.2
pokemon_df.height_m.iloc[102]=2.0
pokemon_df.height_m.iloc[104]=1.0
pokemon_df.height_m.iloc[719]=0.5
pokemon_df.height_m.iloc[744]=0.8
pokemon_df.weight_kg.iloc[18]=3.5
pokemon_df.weight_kg.iloc[19]=18.5
pokemon_df.weight_kg.iloc[25]=30.0
pokemon_df.weight_kg.iloc[26]=12.0
pokemon_df.weight_kg.iloc[27]=29.5
pokemon_df.weight_kg.iloc[36]=9.9
pokemon_df.weight_kg.iloc[37]=19.9
pokemon_df.weight_kg.iloc[49]=0.8
pokemon_df.weight_kg.iloc[50]=33.3
pokemon_df.weight_kg.iloc[51]=4.2
pokemon_df.weight_kg.iloc[52]=32.0
pokemon_df.weight_kg.iloc[73]=20.0
pokemon_df.weight_kg.iloc[74]=105.0
pokemon_df.weight_kg.iloc[75]=300.0
pokemon_df.weight_kg.iloc[87]=30.0
pokemon_df.weight_kg.iloc[88]=30.0
pokemon_df.weight_kg.iloc[102]=120.0
pokemon_df.weight_kg.iloc[104]=45.0
pokemon_df.weight_kg.iloc[719]=9.0
pokemon_df.weight_kg.iloc[744]=25.0
#Create a Genderless column to separate them from the all-female cases.
pokemon_df["Genderless"]=0
pokemon_df["Genderless"].loc[list(index_male)]=1
#Replace all the NANs with zeros in the % male
pokemon_df.percentage_male.replace(np.NaN, 0, inplace=True)
#Check the typings of the pokemon with Alolan forms & fix
#I'm sure this can be done much more elegantly
pokemon_df.type2.iloc[18]='none'
pokemon_df.type2.iloc[19]='none'
pokemon_df.type2.iloc[25]='none'
pokemon_df.type2.iloc[26]='none'
pokemon_df.type2.iloc[27]='none'
pokemon_df.type2.iloc[36]='none'
pokemon_df.type2.iloc[37]='none'
pokemon_df.type2.iloc[49]='none'
pokemon_df.type2.iloc[50]='none'
pokemon_df.type2.iloc[51]='none'
pokemon_df.type2.iloc[52]='none'
pokemon_df.type2.iloc[87]='none'
pokemon_df.type2.iloc[88]='none'
pokemon_df.type2.iloc[104]='none'
#Lets start with just the numerical data for now.
num_features=pokemon_df.select_dtypes(include=np.number)
num_features=num_features.columns
#print("The Type models will be built using the following features")
#print(list(num_features))
# In[4]:
numerical_df=pd.DataFrame.copy(pokemon_df[['attack', 'base_egg_steps', 'base_happiness', 'base_total','capture_rate', 'defense', 'experience_growth','height_m', 'hp', 'percentage_male', 'sp_attack', 'sp_defense', 'speed','weight_kg']])
numerical_df.to_csv('numerical_features.csv',index=False)
one_hot_df=pd.DataFrame.copy(pokemon_df[["Ability1","Ability2","Ability3","egg_group_1","egg_group_2","is_legendary","color_id","shape_id","habitat_id","Genderless"]])
one_hot_df=pd.get_dummies(one_hot_df,prefix=["Ability1","Ability2","Ability3","egg_group_1","egg_group_2","is_legendary","color_id","shape_id","habitat_id","Genderless"],columns=["Ability1","Ability2","Ability3","egg_group_1","egg_group_2","is_legendary","color_id","shape_id","habitat_id","Genderless"])
one_hot_df.to_csv('one_hot_features.csv',index=False)
features=pd.concat([numerical_df,one_hot_df],axis=1)
# In[ ]:
#Do some feature engineering
#features["Total_Offense"]=features["attack"]+features["sp_attack"]
#features["Total_Defense"]=features["defense"]+features["sp_defense"]
#features["Total_Physical"]=features["attack"]+features["defense"]
#features["Total_Special"]=features["sp_attack"]+features["sp_defense"]
#features["Attack_Difference"]=abs(features["attack"]-features["sp_attack"])
#features["Defense_Difference"]=abs(features["defense"]-features["sp_defense"])
#features["Physical_Difference"]=abs(features["attack"]-features["defense"])
#features["Special_Difference"]=abs(features["sp_attack"]-features["sp_defense"])
#features["HeightXWeight"]=features["height_m"]*features["weight_kg"]
#features["BMI"]=features["weight_kg"]/(features["weight_kg"]**2)
#features["Speed_X_Weight"]=features["speed"]*features["weight_kg"]
#features=features.drop(columns=["attack","sp_attack"])
# In[5]:
targets=pd.DataFrame()
targets2=pd.DataFrame()
targets["type1"]=pokemon_df["type1"]
targets=np.ravel(targets)
targets2["type2"]=pokemon_df["type2"]
targets2=np.ravel(targets2)
#Split features & targets into each generation.
Gen1_features=features[0:151]
Gen2_features=features[151:251]
Gen3_features=features[251:386]
Gen4_features=features[386:493]
Gen5_features=features[493:649]
Gen6_features=features[649:721]
Gen7_features=features[721:801]
Gen1_targets=targets[0:151]
Gen2_targets=targets[151:251]
Gen3_targets=targets[251:386]
Gen4_targets=targets[386:493]
Gen5_targets=targets[493:649]
Gen6_targets=targets[649:721]
Gen7_targets=targets[721:801]
Gen1_targets=np.ravel(Gen1_targets)
Gen2_targets=np.ravel(Gen2_targets)
Gen3_targets=np.ravel(Gen3_targets)
Gen4_targets=np.ravel(Gen4_targets)
Gen5_targets=np.ravel(Gen5_targets)
Gen6_targets=np.ravel(Gen6_targets)
Gen7_targets=np.ravel(Gen7_targets)
#Recombine 6 of them, in 7 different ways, to make my different training sets
#Ordering of the features & targets should be the same!
#But doesn't have to be necessarily in numerical order
Gens_not1_features=pd.concat([Gen2_features,Gen3_features,Gen4_features,Gen5_features,Gen6_features,Gen7_features],axis=0)
Gens_not2_features=pd.concat([Gen1_features,Gen3_features,Gen4_features,Gen5_features,Gen6_features,Gen7_features],axis=0)
Gens_not3_features=pd.concat([Gen2_features,Gen1_features,Gen4_features,Gen5_features,Gen6_features,Gen7_features],axis=0)
Gens_not4_features=pd.concat([Gen2_features,Gen3_features,Gen1_features,Gen5_features,Gen6_features,Gen7_features],axis=0)
Gens_not5_features=pd.concat([Gen2_features,Gen3_features,Gen4_features,Gen1_features,Gen6_features,Gen7_features],axis=0)
Gens_not6_features=pd.concat([Gen2_features,Gen3_features,Gen4_features,Gen5_features,Gen1_features,Gen7_features],axis=0)
Gens_not7_features=pd.concat([Gen2_features,Gen3_features,Gen4_features,Gen5_features,Gen6_features,Gen1_features],axis=0)
Gens_not1_targets=np.concatenate((Gen2_targets,Gen3_targets,Gen4_targets,Gen5_targets,Gen6_targets,Gen7_targets),axis=0)
Gens_not2_targets=np.concatenate((Gen1_targets,Gen3_targets,Gen4_targets,Gen5_targets,Gen6_targets,Gen7_targets),axis=0)
Gens_not3_targets=np.concatenate((Gen2_targets,Gen1_targets,Gen4_targets,Gen5_targets,Gen6_targets,Gen7_targets),axis=0)
Gens_not4_targets=np.concatenate((Gen2_targets,Gen3_targets,Gen1_targets,Gen5_targets,Gen6_targets,Gen7_targets),axis=0)
Gens_not5_targets=np.concatenate((Gen2_targets,Gen3_targets,Gen4_targets,Gen1_targets,Gen6_targets,Gen7_targets),axis=0)
Gens_not6_targets=np.concatenate((Gen2_targets,Gen3_targets,Gen4_targets,Gen5_targets,Gen1_targets,Gen7_targets),axis=0)
Gens_not7_targets=np.concatenate((Gen2_targets,Gen3_targets,Gen4_targets,Gen5_targets,Gen6_targets,Gen1_targets),axis=0)
Gen1_targets2=targets2[0:151]
Gen2_targets2=targets2[151:251]
Gen3_targets2=targets2[251:386]
Gen4_targets2=targets2[386:493]
Gen5_targets2=targets2[493:649]
Gen6_targets2=targets2[649:721]
Gen7_targets2=targets2[721:801]
Gen1_targets2=np.ravel(Gen1_targets2)
Gen2_targets2=np.ravel(Gen2_targets2)
Gen3_targets2=np.ravel(Gen3_targets2)
Gen4_targets2=np.ravel(Gen4_targets2)
Gen5_targets2=np.ravel(Gen5_targets2)
Gen6_targets2=np.ravel(Gen6_targets2)
Gen7_targets2=np.ravel(Gen7_targets2)
Gens_not1_targets2=np.concatenate((Gen2_targets2,Gen3_targets2,Gen4_targets2,Gen5_targets2,Gen6_targets2,Gen7_targets2),axis=0)
Gens_not2_targets2=np.concatenate((Gen1_targets2,Gen3_targets2,Gen4_targets2,Gen5_targets2,Gen6_targets2,Gen7_targets2),axis=0)
Gens_not3_targets2=np.concatenate((Gen2_targets2,Gen1_targets2,Gen4_targets2,Gen5_targets2,Gen6_targets2,Gen7_targets2),axis=0)
Gens_not4_targets2=np.concatenate((Gen2_targets2,Gen3_targets2,Gen1_targets2,Gen5_targets2,Gen6_targets2,Gen7_targets2),axis=0)
Gens_not5_targets2=np.concatenate((Gen2_targets2,Gen3_targets2,Gen4_targets2,Gen1_targets2,Gen6_targets2,Gen7_targets2),axis=0)
Gens_not6_targets2=np.concatenate((Gen2_targets2,Gen3_targets2,Gen4_targets2,Gen5_targets2,Gen1_targets2,Gen7_targets2),axis=0)
Gens_not7_targets2=np.concatenate((Gen2_targets2,Gen3_targets2,Gen4_targets2,Gen5_targets2,Gen6_targets2,Gen1_targets2),axis=0)
# # Tuning XGB Parameters
#
# In the previous kernel, I'd only done minor tuning of the XGB parameters when trying to fit to the full Pokedex. I'd then just assumed this was the best choice for all other situations, which might not actually be true.
#
# In this kernel, I optimized a range of hyperparameters for both the Type 1 and Type 2 models, to obtain the best Test accuracy. This included tuning:
#
# * max depth and min child weight
# * subsample and col sample by tree
# * gamma
# * reg alpha
# * reg lambda
# * learning rate and n estimators
#
# In both cases, I was able to improve the accuracy by about 5% compared to the default values.
#
# For both models, I also explored the effect of adding weightings, but only found improvements for the Type 2 model, which has a major imbalance between None and all other types.
#
# For type 1, I found that the optimal parameters were:
#
# max depth = 3, n estimators = 158, learning rate = 0.1, gamma = 0, min child weight = 1, subsample = 0.6, col sample by tree = 0.2, alpha =0 and lambda = 0.9.
#
# In[6]:
params={'max_depth':3,'learning_rate':0.1,'n_estimators':300,'silent':True,'booster':'gbtree','n_jobs':1,'nthread':4,'gamma':0,'min_child_weight':1,'max_delta_step':0,'subsample':0.6,'colsample_bytree':0.2,'colsample_bylevel':1,'reg_alpha':0,'reg_lambda':0.9,'scale_pos_weight':1,'base_score':0.5,'random_state':1,'missing':None,}
# In[ ]:
#Test adding weights wrt water
#weights = np.zeros(len(Gens_not1_targets))
#for i in range(len(Gens_not1_targets)):
# weights[i]=Counter(Gens_not1_targets)['water']/Counter(Gens_not1_targets)[Gens_not1_targets[i]]
#weights
# In[7]:
#Generation 1 model
model_xgb=xgb.XGBClassifier(**params)
eval_set = [(Gens_not1_features, Gens_not1_targets),(Gen1_features, Gen1_targets)]
model_xgb.fit(Gens_not1_features, Gens_not1_targets,eval_set=eval_set,eval_metric="merror",verbose=False)
training_eval=model_xgb.evals_result()
min_error=min(training_eval['validation_1']['merror'])
print("The minimum error is:")
print(min_error)
training_step=training_eval['validation_1']['merror'].index(min_error)
print("This occurs at step:")
print(training_step)
xgb.plot_importance(model_xgb,max_num_features=20)
# In[8]:
#Final model
params['n_estimators']=158
model_xgb=xgb.XGBClassifier(**params)
model_xgb.fit(Gens_not1_features, Gens_not1_targets)
Gen1_T1_pred = model_xgb.predict(Gen1_features)
# evaluate predictions
test_accuracy = accuracy_score(Gen1_targets, Gen1_T1_pred)
print("Test Accuracy: %.2f%%" % (test_accuracy * 100.0))
xgb.plot_importance(model_xgb,max_num_features=20)
# Output a plot of the confusion matrix.
labels =list(set(Gen1_targets))
cm = metrics.confusion_matrix(Gen1_targets, Gen1_T1_pred,labels)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
sns.set(font_scale=4)
plt.figure(figsize=(20,20))
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
plt.xticks(rotation=90)
plt.yticks(rotation=0)
plt.title("Type 1 Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
print()
sns.set(font_scale=0.8)
# After hyperparameter tuning, I was able to get a 72.19% accuracy for Type 1, which just beats my models from the previous attempt.
#
# All types have some correct predictions, except for Ice, which is always confused for either Water or Psychic.
#
# By contrast, Bug, Water and Grass each have 100% accuracy, and Normal performs pretty well too.
#
# Most of the incorrect predictions appear to be from incorrect assignment of Water, Normal, Electric or Psychic type, meaning the model is over-predicted all four of these types.
#
# Since type ordering is somewhat arbitrary, there is the possibility that some of these are correct predictions, but for Type 2, rather than type 1.
# In[9]:
print("Some predictions may match the sub-type, rather than the main type")
mismatch_accuracy = accuracy_score(Gen1_targets2, Gen1_T1_pred)
print("Mismatch Accuracy: %.2f%%" % (mismatch_accuracy * 100.0))
print("The Pokemon whose predicted types match their sub-type are:")
for i in range(0,len(Gen1_targets)):
if Gen1_T1_pred[i] == Gen1_targets2[i]:
print (pokemon_df["name"][i])
# As it turns out, there are 7 Pokemon which fall into this category.
#
# However, this still leaves about a quarter of the Pokemon with incorrect types.
#
# One possible way to address this is to look closer at the incorrect predictions to see where they went wrong, and come up with ideas for how to fix them. For now, this is a task left to the future.
# In[10]:
print("Pokemon with incorrect types are as follows:")
for i in range(0,len(Gen1_targets)):
if Gen1_T1_pred[i] != Gen1_targets[i]:
print (pokemon_df["name"][i],Gen1_T1_pred[i])
# In[ ]:
#selection = SelectFromModel(model_xgb, threshold=1e-15,prefit=True)
#feature_idx = selection.get_support()
#feature_name = Gens_not1_features.columns[feature_idx]
#print(feature_name)
#print(feature_name.shape)
# In[11]:
weights = np.zeros(len(Gens_not1_targets2))
for i in range(len(Gens_not1_targets2)):
weights[i]=Counter(Gens_not1_targets2)['none']/Counter(Gens_not1_targets2)[Gens_not1_targets2[i]]
# For type 2, I found that the optimal parameters were:
#
# max depth = 4, n estimators = 242, learning rate = 0.1, gamma = 0.1, min child weight = 3, subsample = 1, col sample by tree = 0.3, alpha =0 and lambda = 1.
#
# In[12]:
#With weights
#Max depth 4
#child weight 3
#gamma 0.1
#colsample 0.3
#Without weights: child weight=4, lambda=4
params2={'max_depth':4,'learning_rate':0.1,'n_estimators':300,'silent':True,'booster':'gbtree','n_jobs':1,'nthread':4,'gamma':0.1,'min_child_weight':3,'max_delta_step':0,'subsample':1,'colsample_bytree':0.3,'colsample_bylevel':1,'reg_alpha':0,'reg_lambda':1,'scale_pos_weight':1,'base_score':0.5,'random_state':1,'missing':None,}
# In[13]:
#Type 2 classification
model_xgb2=xgb.XGBClassifier(**params2)
eval_set = [(Gens_not1_features, Gens_not1_targets2),(Gen1_features, Gen1_targets2)]
model_xgb2.fit(Gens_not1_features, Gens_not1_targets2,sample_weight=weights,eval_set=eval_set,eval_metric="merror",verbose=False)
training_eval=model_xgb2.evals_result()
min_error=min(training_eval['validation_1']['merror'])
print("The minimum error is:")
print(min_error)
training_step=training_eval['validation_1']['merror'].index(min_error)
print("This occurs at step:")
print(training_step)
xgb.plot_importance(model_xgb2,max_num_features=20)
# In[14]:
#Type 2 final version
params2['n_estimators']=242
model_xgb2=xgb.XGBClassifier(**params2)
model_xgb2.fit(Gens_not1_features, Gens_not1_targets2,weights)
Gen1_T2_pred = model_xgb2.predict(Gen1_features)
# evaluate predictions
test_accuracy = accuracy_score(Gen1_targets2, Gen1_T2_pred)
print("Test Accuracy: %.2f%%" % (test_accuracy * 100.0))
xgb.plot_importance(model_xgb2,max_num_features=20)
# Output a plot of the confusion matrix.
labels =list(set(Gen1_targets2))
cm = metrics.confusion_matrix(Gen1_targets2, Gen1_T2_pred,labels)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
sns.set(font_scale=4)
plt.figure(figsize=(20,20))
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
plt.xticks(rotation=90)
plt.yticks(rotation=0)
plt.title("Type 2 Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
print()
sns.set(font_scale=0.8)
# After hyperparameter tuning, I was able to get a 67.55% accuracy for Type 2, which marginally beats my older model.
#
# As always for Type 2, most types were incorrectly predicted as None, with a few other misclassifcations, as for example Rock, Ground or Poison.
#
# Flying and Rock stand out as particularly good predictions, with most of both correctly identified. Steel, Psychic and Ground all have a reasonable number of correct predictions.
#
# Since type ordering is somewhat arbitrary, there is the possibility that some of these are correct predictions, but for Type 1, rather than type 2.
# In[15]:
print("Some predictions may match the main type, rather than the sub-type")
mismatch_accuracy_T2 = accuracy_score(Gen1_targets, Gen1_T2_pred)
print("Mismatch Accuracy: %.2f%%" % (mismatch_accuracy_T2 * 100.0))
print("The Pokemon whose predicted types match their main type are:")
for i in range(0,len(Gen1_targets2)):
if Gen1_T2_pred[i] == Gen1_targets[i]:
print (pokemon_df["name"][i])
# In this case, 6 Pokemon had the correct type predictions, but in the wrong order. The 4 fossil Pokemon, between Omanyte and Kabutops, have appeared in both sets of mis-ordered predictions. This means that both types were correctly predicted, just in the wrong order.
#
# As before, it might be instructive to look at the incorrect predictions to try and work out where they went wrong.
# In[16]:
print("Pokemon with incorrect sub-types are as follows:")
for i in range(0,len(Gen1_targets2)):
if Gen1_T2_pred[i] != Gen1_targets2[i]:
print (pokemon_df["name"][i],Gen1_T2_pred[i])
# In the majority of cases, it is just a matter that None was selected instead of the correct type, suggesting it might be possible to add more information to the model and improve the predictions.
#
# In other cases, a Pokemon was predicted a type, but it was wrong. A few of these are interesting, given the nature of the incorrect prediciton.
#
# For example, Charizard is predicted to have Dragon, rather than Flying sub-type. This has been a wish of fans since the beginning, and actually came true for one of the Mega Evolutions.
#
# Beedrill and Venomoth are both predicted to be Flying sub-type, which is understandable, given that they both have wings, however they are both actually poison types.
#
# Some of the other mistakes, like Mewtwo being sub-type Ice, or Gyarados being Ground, are just odd.
# I improved both of my models by incorporating the ordering mismatches. This lead to slight improvements for both models, although by less than the number of mis-ordered Types. This is because the other model may have predicted the same type already, meaning that updating the value made no difference.
# In[17]:
Gen1_T1_pred_v2=Gen1_T1_pred.copy()
Gen1_T2_pred_v2=Gen1_T2_pred.copy()
for i in range(0,len(Gen1_targets)):
if Gen1_T1_pred[i] == Gen1_targets2[i]:
Gen1_T2_pred_v2[i]=Gen1_T1_pred[i]
for i in range(0,len(Gen1_targets)):
if Gen1_T2_pred[i] == Gen1_targets[i]:
Gen1_T1_pred_v2[i]=Gen1_T2_pred[i]
Type1_accuracy = accuracy_score(Gen1_targets, Gen1_T1_pred_v2)
print("New Type 1 Accuracy: %.2f%%" % (Type1_accuracy * 100.0))
Type2_accuracy = accuracy_score(Gen1_targets2, Gen1_T2_pred_v2)
print("New Type 2 Accuracy: %.2f%%" % (Type2_accuracy * 100.0))
# By combining the two models in this way, I was able to raise the accuracy of both to over 70%, and reach new records for both.
#
# Something interesting to note, is that when I re-used my Type 1 parameters for Type 2, the overall accuracy was worse, but the mismatch percentage was higher. Meaning that I could get a 75% accuracy on Type 1 when both models were combined, but with a lower Type 2 accuracy.
#
# I'd still like to do feature engineering in some way, because I'm sure it must be possible to improve the accuracy further.
#
#
# In[26]:
XGB_predictions_df= | pd.DataFrame() | pandas.DataFrame |
# BSD 3-Clause License
#
# Copyright (c) 2016-21, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A module to produce a model validation plot
It uses one external program:
map_align for contact map alignment
*** This program needs to be installed separately from https://github.com/sokrypton/map_align***
"""
from __future__ import division
from __future__ import print_function
import os
from Bio.PDB.DSSP import DSSP
import numpy as np
import pandas as pd
import tempfile
from conkit.applications import MapAlignCommandline
from conkit.core.distance import Distance
import conkit.io
from conkit.misc import load_validation_model, SELECTED_VALIDATION_FEATURES, ALL_VALIDATION_FEATURES
from conkit.plot.figure import Figure
import conkit.plot.tools as tools
LINEKWARGS = dict(linestyle="--", linewidth=1.0, alpha=0.5, color=tools.ColorDefinitions.MISMATCH, zorder=1)
MARKERKWARGS = dict(marker='|', linestyle='None')
_MARKERKWARGS = dict(marker='s', linestyle='None')
class ModelValidationFigure(Figure):
"""A Figure object specifc for a model validation. This figure represents the proabbility that each given residue
in the model is involved in a model error. This is donw by feeding a trained classfier the differences observed
between the predicted distogram and the observed inter-residue contacts and distances at the PDB model.
Attributes
----------
model: :obj:`~conkit.core.distogram.Distogram`
The PDB model that will be validated
prediction: :obj:`~conkit.core.distogram.Distogram`
The distogram with the residue distance predictions
sequence: :obj:`~conkit.core.sequence.Sequence`
The sequence of the structure
dssp: :obj:`Bio.PDB.DSSP.DSSP`
The DSSP output for the PDB model that will be validated
map_align_exe: str
The path to map_align executable [default: None]
dist_bins: list, tuple
A list of tuples with the boundaries of the distance bins to use in the calculation [default: CASP2 bins]
l_factor: float
The L/N factor used to filter the contacts before finding the False Negatives [default: 0.5]
absent_residues: set
The residues not observed in the model that will be validated (only if in PDB format)
Examples
--------
>>> from Bio.PDB import PDBParser
>>> from Bio.PDB.DSSP import DSSP
>>> p = PDBParser()
>>> structure = p.get_structure('TOXD', 'toxd/toxd.pdb')[0]
>>> dssp = DSSP(structure, 'toxd/toxd.pdb', dssp='mkdssp', acc_array='Wilke')
>>> import conkit
>>> sequence = conkit.io.read('toxd/toxd.fasta', 'fasta').top
>>> model = conkit.io.read('toxd/toxd.pdb', 'pdb').top_map
>>> prediction = conkit.io.read('toxd/toxd.npz', 'rosettanpz').top_map
>>> conkit.plot.ModelValidationFigure(model, prediction, sequence, dssp)
"""
def __init__(self, model, prediction, sequence, dssp, map_align_exe=None, dist_bins=None, l_factor=0.5, **kwargs):
"""A new model validation plot
Parameters
----------
model: :obj:`~conkit.core.distogram.Distogram`
The PDB model that will be validated
prediction: :obj:`~conkit.core.distogram.Distogram`
The distogram with the residue distance predictions
sequence: :obj:`~conkit.core.sequence.Sequence`
The sequence of the structure
dssp: :obj:`Bio.PDB.DSSP.DSSP`
The DSSP output for the PDB model that will be validated
map_align_exe: str
The path to map_align executable [default: None]
dist_bins: list, tuple
A list of tuples with the boundaries of the distance bins to use in the calculation [default: CASP2 bins]
l_factor: float
The L/N factor used to filter the contacts before finding the False Negatives [default: 0.5]
**kwargs
General :obj:`~conkit.plot.figure.Figure` keyword arguments
"""
super(ModelValidationFigure, self).__init__(**kwargs)
self._model = None
self._prediction = None
self._sequence = None
self._distance_bins = None
self.data = None
self.alignment = {}
self.sorted_scores = None
self.smooth_scores = None
if len(sequence) < 5:
raise ValueError('Cannot validate a model with less than 5 residues')
self.map_align_exe = map_align_exe
self.l_factor = l_factor
self.dist_bins = dist_bins
self.model = model
self.prediction = prediction
self.sequence = sequence
self.classifier, self.scaler = load_validation_model()
self.absent_residues = self._get_absent_residues()
self.dssp = self._parse_dssp(dssp)
self.draw()
def __repr__(self):
return self.__class__.__name__
@property
def dist_bins(self):
return self._dist_bins
@dist_bins.setter
def dist_bins(self, dist_bins):
if dist_bins is None:
self._dist_bins = ((0, 4), (4, 6), (6, 8), (8, 10), (10, 12), (12, 14),
(14, 16), (16, 18), (18, 20), (20, np.inf))
else:
Distance._assert_valid_bins(dist_bins)
self._dist_bins = dist_bins
@property
def sequence(self):
return self._sequence
@sequence.setter
def sequence(self, sequence):
if sequence and tools._isinstance(sequence, "Sequence"):
self._sequence = sequence
else:
raise TypeError("Invalid hierarchy type for sequence: %s" % sequence.__class__.__name__)
@property
def prediction(self):
return self._prediction
@prediction.setter
def prediction(self, prediction):
if prediction and tools._isinstance(prediction, "Distogram"):
self._prediction = prediction
else:
raise TypeError("Invalid hierarchy type for prediction: %s" % prediction.__class__.__name__)
@property
def model(self):
return self._model
@model.setter
def model(self, model):
if model and tools._isinstance(model, "Distogram"):
self._model = model
else:
raise TypeError("Invalid hierarchy type for model: %s" % model.__class__.__name__)
def _get_absent_residues(self):
"""Get a set of residues absent from the :attr:`~conkit.plot.ModelValidationFigure.model` and
:attr:`~conkit.plot.ModelValidationFigure.prediction`. Only distograms originating from PDB files
are considered."""
absent_residues = []
if self.model.original_file_format == "PDB":
absent_residues += self.model.get_absent_residues(len(self.sequence))
if self.prediction.original_file_format == "PDB":
absent_residues += self.prediction.get_absent_residues(len(self.sequence))
return set(absent_residues)
def _prepare_distogram(self, distogram):
"""General operations to prepare a :obj:`~conkit.core.distogram.Distogram` instance before plotting."""
distogram.get_unique_distances(inplace=True)
distogram.sequence = self.sequence
distogram.set_sequence_register()
if distogram.original_file_format != "PDB":
distogram.reshape_bins(self.dist_bins)
return distogram
def _prepare_contactmap(self, distogram):
"""General operations to prepare a :obj:`~conkit.core.contactmap.ContactMap` instance before plotting."""
contactmap = distogram.as_contactmap()
contactmap.sequence = self.sequence
contactmap.set_sequence_register()
contactmap.remove_neighbors(inplace=True)
if distogram.original_file_format != "PDB":
contactmap.sort("raw_score", reverse=True, inplace=True)
contactmap.slice_map(seq_len=len(self.sequence), l_factor=self.l_factor, inplace=True)
return contactmap
def _parse_dssp(self, dssp):
"""Parse :obj:`Bio.PDB.DSSP.DSSP` into a :obj:`pandas.DataFrame` with secondary structure information
about the model"""
if not tools._isinstance(dssp, DSSP):
raise TypeError("Invalid hierarchy type for dssp: %s" % dssp.__class__.__name__)
_dssp_list = []
for residue in sorted(dssp.keys(), key=lambda x: x[1][1]):
resnum = residue[1][1]
if resnum in self.absent_residues:
_dssp_list.append((resnum, np.nan, np.nan, np.nan, np.nan))
continue
acc = dssp[residue][3]
if dssp[residue][2] in ('-', 'T', 'S'):
ss2 = (1, 0, 0)
elif dssp[residue][2] in ('H', 'G', 'I'):
ss2 = (0, 1, 0)
else:
ss2 = (0, 0, 1)
_dssp_list.append((resnum, *ss2, acc))
dssp = pd.DataFrame(_dssp_list)
dssp.columns = ['RESNUM', 'COIL', 'HELIX', 'SHEET', 'ACC']
return dssp
def _get_cmap_alignment(self):
"""Obtain a contact map alignment between :attr:`~conkit.plot.ModelValidationFigure.model` and
:attr:`~conkit.plot.ModelValidationFigure.prediction` and get the misaligned residues"""
with tempfile.TemporaryDirectory() as tmpdirname:
contact_map_a = os.path.join(tmpdirname, 'contact_map_a.mapalign')
contact_map_b = os.path.join(tmpdirname, 'contact_map_b.mapalign')
conkit.io.write(contact_map_a, 'mapalign', self.prediction)
conkit.io.write(contact_map_b, 'mapalign', self.model)
map_align_cline = MapAlignCommandline(
cmd=self.map_align_exe,
contact_map_a=contact_map_a,
contact_map_b=contact_map_b)
stdout, stderr = map_align_cline()
self.alignment = tools.parse_map_align_stdout(stdout)
def _parse_data(self, predicted_dict, *metrics):
"""Create a :obj:`pandas.DataFrame` with the features of the residues in the model"""
_features = []
for residue_features in zip(sorted(predicted_dict.keys()), *metrics):
_features.append((*residue_features,))
self.data = | pd.DataFrame(_features) | pandas.DataFrame |
# This file is called separately from the rest of the program. This file takes the original data and creates cleaner csvs for app.py to use
import gsw
import numpy as np
import pandas as pd
# all of the parameters from the full data: 'yyyy-mm-ddThh:mm:ss.sss', 'Longitude [degrees_east]', 'Latitude [degrees_north]',
# 'PRESSURE [dbar]', 'DEPTH [m]', 'CTDTMP [deg C]', 'CTDSAL', 'SALINITY_D_CONC_BOTTLE', 'SALINITY_D_CONC_PUMP',
# 'SALINITY_D_CONC_FISH', 'SALINITY_D_CONC_UWAY', 'NITRATE_D_CONC_BOTTLE [umol/kg]', 'NITRATE_D_CONC_PUMP [umol/kg]',
# 'NITRATE_D_CONC_FISH [umol/kg]', 'NITRATE_D_CONC_UWAY [umol/kg]', 'NITRATE_LL_D_CONC_BOTTLE [umol/kg]',
# 'NITRATE_LL_D_CONC_FISH [umol/kg]', 'NO2+NO3_D_CONC_BOTTLE [umol/kg]', 'NO2+NO3_D_CONC_FISH [umol/kg]',
# 'Fe_D_CONC_BOTTLE [nmol/kg]', 'Fe_D_CONC_FISH [nmol/kg]', 'Fe_II_D_CONC_BOTTLE [nmol/kg]', 'Fe_II_D_CONC_FISH [nmol/kg]',
# 'Fe_S_CONC_BOTTLE [nmol/kg]', 'Fe_S_CONC_FISH [nmol/kg]'
# averages data with the exact same depth.
def average_data(cruise_data):
# from https://stackoverflow.com/questions/48830324/pandas-average-columns-with-same-value-in-other-columns
cruise_data = cruise_data.groupby(
["Latitude", "Longitude", "Station", "Depth"], as_index=False
).mean()
return cruise_data
# removes stations with specifically empty iron data.
def remove_empty_data(cruise_data):
grouped_data = cruise_data.groupby(["Latitude", "Longitude", "Station"])
for name, group in grouped_data:
if group["Iron"].isna().values.all():
cruise_data = cruise_data.drop(grouped_data.get_group(name).index)
return cruise_data
# gets the average nitrate values that are used to get ratio data.
def get_nitrate(cruise_data, index, row):
current_depth = row["Depth"]
min = None
max = None
if row["Depth"] <= 100: # for under 100m, we average nitrates between +/- 5m
min, max = current_depth - 5, current_depth + 5
elif row["Depth"] > 100: # for over 100m, we average nitrates between +/- 10m
min, max = current_depth - 10, current_depth + 10
lon = row["Longitude"]
lat = row["Latitude"]
avg_nitrate = cruise_data["Nitrate"][
(
(cruise_data.Depth <= max)
& (cruise_data.Depth >= min)
& (cruise_data.Longitude == lon)
& (cruise_data.Latitude == lat)
)
].mean()
return avg_nitrate
# create the ratio data
def add_ratio_data(cruise_data):
averaged_nitrate = []
# get averaged nitrate data at each point
for index, row in cruise_data.iterrows():
nitrate = get_nitrate(cruise_data, index, row)
averaged_nitrate.append(nitrate)
ratio = (
np.array(averaged_nitrate) / cruise_data["Iron"]
) # calculate ratio by dividing averaged nitrate by iron
cruise_data[
"Averaged Nitrate"
] = averaged_nitrate # add a column of averaged nitrate
cruise_data["Ratio"] = ratio # add the ratio column
# add the column of density data
def add_density_data(cruise_data):
# Uses the gsw library: http://www.teos-10.org/pubs/gsw/html/gsw_sigma0.html
practical_salinity = cruise_data["Salinity"]
pressure = cruise_data["Pressure"]
longitude = cruise_data["Longitude"]
latitude = cruise_data["Latitude"]
absolute_salinity = gsw.SA_from_SP(
practical_salinity, pressure, longitude, latitude
)
temperature = cruise_data["Temperature"]
sigma0 = gsw.sigma0(absolute_salinity, temperature)
cruise_data["Density"] = sigma0
# read in original data
GA03_data = pd.read_csv("./data/GA03w.csv")
GIPY05_data = pd.read_csv("./data/GIPY05e.csv")
GP02_data = pd.read_csv("./data/GP02w.csv")
GIPY04_data = pd.read_csv("./data/GIPY04.csv")
# the headers for our clean data
headers = [
"Station",
"Date",
"Latitude",
"Longitude",
"Depth",
"Temperature",
"Salinity",
"Nitrate",
"Iron",
"Pressure",
]
# make GA03 dataframe and csv
data = [
GA03_data["Station"],
GA03_data["yyyy-mm-ddThh:mm:ss.sss"],
GA03_data["Latitude [degrees_north]"],
GA03_data["Longitude [degrees_east]"],
GA03_data["DEPTH [m]"],
GA03_data["CTDTMP [deg C]"],
GA03_data["CTDSAL"],
GA03_data["NITRATE_D_CONC_BOTTLE [umol/kg]"],
GA03_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GA03_data["PRESSURE [dbar]"],
]
GA03 = | pd.concat(data, axis=1, keys=headers) | pandas.concat |
# -*- coding: utf-8 -*-
import inspect
import io
import logging
import os
import time
import warnings
from collections import Iterable, Iterator, defaultdict, namedtuple
from copy import copy
from functools import wraps
from typing import Any, Dict, Optional, cast
import numpy as np
import pandas as pd
import pyarrow as pa
from kartothek.core import naming
from kartothek.core.common_metadata import (
make_meta,
normalize_column_order,
read_schema_metadata,
validate_compatible,
validate_shared_columns,
)
from kartothek.core.index import ExplicitSecondaryIndex, IndexBase
from kartothek.core.index import merge_indices as merge_indices_algo
from kartothek.core.naming import get_partition_file_prefix
from kartothek.core.partition import Partition
from kartothek.core.urlencode import decode_key, quote_indices
from kartothek.core.utils import ensure_string_type, verify_metadata_version
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.docs import default_docs
from kartothek.io_components.utils import _instantiate_store, combine_metadata
from kartothek.serialization import (
DataFrameSerializer,
default_serializer,
filter_df_from_predicates,
)
LOGGER = logging.getLogger(__name__)
SINGLE_TABLE = "table"
_Literal = namedtuple("_Literal", ["column", "op", "value"])
_SplitPredicate = namedtuple("_SplitPredicate", ["key_part", "content_part"])
def _predicates_to_named(predicates):
if predicates is None:
return None
return [[_Literal(*x) for x in conjunction] for conjunction in predicates]
def _combine_predicates(predicates, logical_conjunction):
if not logical_conjunction:
return predicates
if predicates is None:
return [logical_conjunction]
combined_predicates = []
for conjunction in predicates:
new_conjunction = conjunction[:]
for literal in logical_conjunction:
new_conjunction.append(literal)
combined_predicates.append(new_conjunction)
return combined_predicates
def _initialize_store_for_metapartition(method, method_args, method_kwargs):
for store_variable in ["store", "storage"]:
if store_variable in method_kwargs:
method_kwargs[store_variable] = _instantiate_store(
method_kwargs[store_variable]
)
else:
method = cast(object, method)
args = inspect.getfullargspec(method).args
if store_variable in args:
ix = args.index(store_variable)
# reduce index since the argspec and method_args start counting differently due to self
ix -= 1
instantiated_store = _instantiate_store(method_args[ix])
new_args = []
for ix_method, arg in enumerate(method_args):
if ix_method != ix:
new_args.append(arg)
else:
new_args.append(instantiated_store)
method_args = tuple(new_args)
return method_args, method_kwargs
def _apply_to_list(method):
"""
Decorate a MetaPartition method to act upon the internal list of metapartitions
The methods must return a MetaPartition object!
"""
@wraps(method)
def _impl(self, *method_args, **method_kwargs):
if not isinstance(self, MetaPartition):
raise TypeError("Type unknown %s", type(self))
result = self.as_sentinel()
if len(self) == 0:
raise RuntimeError("Invalid MetaPartition. No sub-partitions to act upon.")
# Look whether there is a `store` in the arguments and instatiate it
# this way we avoid multiple HTTP pools
method_args, method_kwargs = _initialize_store_for_metapartition(
method, method_args, method_kwargs
)
if (len(self) == 1) and (self.label is None):
result = method(self, *method_args, **method_kwargs)
else:
for mp in self:
method_return = method(mp, *method_args, **method_kwargs)
if not isinstance(method_return, MetaPartition):
raise ValueError(
"Method {} did not return a MetaPartition "
"but {}".format(method.__name__, type(method_return))
)
if method_return.is_sentinel:
result = method_return
else:
for mp in method_return:
result = result.add_metapartition(mp)
if not isinstance(result, MetaPartition):
raise ValueError(
"Result for method {} is not a `MetaPartition` but".format(
method.__name__, type(method_return)
)
)
return result
return _impl
class MetaPartitionIterator(Iterator):
def __init__(self, metapartition):
self.metapartition = metapartition
self.position = 0
def __iter__(self):
return self
def __next__(self):
current = self.metapartition
if len(current) == 1:
if current.label is None:
raise StopIteration()
if self.position >= len(current.metapartitions):
raise StopIteration()
else:
mp_dict = current.metapartitions[self.position]
# These are global attributes, i.e. the nested metapartitions do not carry these and need
# to be added here
mp_dict["dataset_metadata"] = current.dataset_metadata
mp_dict["metadata_version"] = current.metadata_version
mp_dict["table_meta"] = current.table_meta
mp_dict["partition_keys"] = current.partition_keys
mp_dict["logical_conjunction"] = current.logical_conjunction
self.position += 1
return MetaPartition.from_dict(mp_dict)
next = __next__ # Python 2
class MetaPartition(Iterable):
"""
Wrapper for kartothek partition which includes additional information
about the parent dataset
"""
def __init__(
self,
label,
files=None,
metadata=None,
data=None,
dataset_metadata=None,
indices: Optional[Dict[Any, Any]] = None,
metadata_version=None,
table_meta=None,
partition_keys=None,
logical_conjunction=None,
):
"""
Initialize the :mod:`kartothek.io` base class MetaPartition.
The `MetaPartition` is used as a wrapper around the kartothek
`Partition` and primarily deals with dataframe manipulations,
in- and output to store.
The :class:`kartothek.io_components.metapartition` is immutable, i.e. all member
functions will return a new MetaPartition object where the new
attribute is changed
Parameters
----------
label : basestring
partition label
files : dict, optional
A dictionary with references to the files in store where the
keys represent file labels and the keys file prefixes.
metadata : dict, optional
The metadata of the partition
data : dict, optional
A dictionary including the materialized in-memory DataFrames
corresponding to the file references in `files`.
dataset_metadata : dict, optional
The metadata of the original dataset
indices : dict, optional
Kartothek index dictionary,
metadata_version : int, optional
table_meta: Dict[str, SchemaWrapper]
The dataset table schemas
partition_keys: List[str]
The dataset partition keys
logical_conjunction: List[Tuple[object, str, object]]
A logical conjunction to assign to the MetaPartition. By assigning
this, the MetaPartition will only be able to load data respecting
this conjunction.
"""
if metadata_version is None:
self.metadata_version = naming.DEFAULT_METADATA_VERSION
else:
self.metadata_version = metadata_version
verify_metadata_version(self.metadata_version)
self.table_meta = table_meta if table_meta else {}
if isinstance(data, dict) and (len(self.table_meta) == 0):
for table, df in data.items():
if df is not None:
self.table_meta[table] = make_meta(
df,
origin="{}/{}".format(table, label),
partition_keys=partition_keys,
)
indices = indices or {}
for column, index_dct in indices.items():
if isinstance(index_dct, dict):
indices[column] = ExplicitSecondaryIndex(
column=column, index_dct=index_dct
)
self.logical_conjunction = logical_conjunction
self.metapartitions = [
{
"label": label,
"data": data or {},
"files": files or {},
"indices": indices,
"logical_conjunction": logical_conjunction,
}
]
self.dataset_metadata = dataset_metadata or {}
self.partition_keys = partition_keys or []
def __repr__(self):
if len(self.metapartitions) > 1:
label = "NESTED ({})".format(len(self.metapartitions))
else:
label = self.label
return "<{_class} v{version} | {label} | tables {tables} >".format(
version=self.metadata_version,
_class=self.__class__.__name__,
label=label,
tables=sorted(set(self.table_meta.keys())),
)
def __len__(self):
return len(self.metapartitions)
def __iter__(self):
return MetaPartitionIterator(self)
def __getitem__(self, label):
for mp in self:
if mp.label == label:
return mp
raise KeyError("Metapartition doesn't contain partition `{}`".format(label))
@property
def data(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `data` attribute is not allowed while nested"
)
assert isinstance(self.metapartitions[0], dict), self.metapartitions
return self.metapartitions[0]["data"]
@property
def files(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `files` attribute is not allowed while nested"
)
return self.metapartitions[0]["files"]
@property
def is_sentinel(self):
return len(self.metapartitions) == 1 and self.label is None
@property
def label(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `label` attribute is not allowed while nested"
)
assert isinstance(self.metapartitions[0], dict), self.metapartitions[0]
return self.metapartitions[0]["label"]
@property
def indices(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `indices` attribute is not allowed while nested"
)
return self.metapartitions[0]["indices"]
@property
def tables(self):
return list(set(self.data.keys()).union(set(self.files.keys())))
@property
def partition(self):
return Partition(label=self.label, files=self.files)
def __eq__(self, other):
if not isinstance(other, MetaPartition):
return False
if self.metadata_version != other.metadata_version:
return False
for table, meta in self.table_meta.items():
if not meta.equals(other.table_meta.get(table, None)):
return False
if self.dataset_metadata != other.dataset_metadata:
return False
if len(self.metapartitions) != len(other.metapartitions):
return False
# In the case both MetaPartitions are nested, we need to ensure a match
# for all sub-partitions.
# Since the label is unique, this can be used as a distinguishing key to sort and compare
# the nested metapartitions.
if len(self.metapartitions) > 1:
for mp_self, mp_other in zip(
sorted(self.metapartitions, key=lambda x: x["label"]),
sorted(other.metapartitions, key=lambda x: x["label"]),
):
if mp_self == mp_other:
continue
# If a single metapartition does not match, the whole object is considered different
return False
return True
# This is unnested only
self_keys = set(self.data.keys())
other_keys = set(other.data.keys())
if not (self_keys == other_keys):
return False
if self.label != other.label:
return False
if self.files != other.files:
return False
for label, df in self.data.items():
if not (df.equals(other.data[label])):
return False
return True
@staticmethod
def from_partition(
partition,
data=None,
dataset_metadata=None,
indices=None,
metadata_version=None,
table_meta=None,
partition_keys=None,
logical_conjunction=None,
):
"""
Transform a kartothek :class:`~kartothek.core.partition.Partition` into a
:class:`~kartothek.io_components.metapartition.MetaPartition`.
Parameters
----------
partition : :class:`~kartothek.core.partition.Partition`
The kartothek partition to be wrapped
data : dict, optional
A dictionaries with materialised :class:`~pandas.DataFrame`
dataset_metadata : dict of basestring, optional
The metadata of the original dataset
indices : dict
The index dictionary of the dataset
table_meta: Union[None, Dict[String, pyarrow.Schema]]
Type metadata for each table, optional
metadata_version: int, optional
partition_keys: Union[None, List[String]]
A list of the primary partition keys
Returns
-------
:class:`~kartothek.io_components.metapartition.MetaPartition`
"""
return MetaPartition(
label=partition.label,
files=partition.files,
data=data,
dataset_metadata=dataset_metadata,
indices=indices,
metadata_version=metadata_version,
table_meta=table_meta,
partition_keys=partition_keys,
logical_conjunction=logical_conjunction,
)
def add_metapartition(
self, metapartition, metadata_merger=None, schema_validation=True
):
"""
Adds a metapartition to the internal list structure to enable batch processing.
The top level `dataset_metadata` dictionary is combined with the existing dict and
all other attributes are stored in the `metapartitions` list
Parameters
----------
metapartition: [MetaPartition]
The MetaPartition to be added.
metadata_merger: [callable]
A callable to perform the metadata merge. By default [kartothek.io_components.utils.combine_metadata] is used
schema_validation : [bool]
If True (default), ensure that the `table_meta` of both `MetaPartition` objects are the same
"""
if self.is_sentinel:
return metapartition
table_meta = metapartition.table_meta
existing_label = [mp_["label"] for mp_ in self.metapartitions]
if any(
[mp_["label"] in existing_label for mp_ in metapartition.metapartitions]
):
raise RuntimeError(
"Duplicate labels for nested metapartitions are not allowed!"
)
if schema_validation:
table_meta = {}
for table, meta in self.table_meta.items():
other = metapartition.table_meta.get(table, None)
# This ensures that only schema-compatible metapartitions can be nested
# The returned schema by validate_compatible is the reference schema with the most
# information, i.e. the fewest null columns
table_meta[table] = validate_compatible([meta, other])
metadata_merger = metadata_merger or combine_metadata
new_dataset_metadata = metadata_merger(
[self.dataset_metadata, metapartition.dataset_metadata]
)
new_object = MetaPartition(
label="NestedMetaPartition",
dataset_metadata=new_dataset_metadata,
metadata_version=metapartition.metadata_version,
table_meta=table_meta,
partition_keys=metapartition.partition_keys or None,
logical_conjunction=metapartition.logical_conjunction or None,
)
# Add metapartition information to the new object
new_metapartitions = self.metapartitions.copy()
new_metapartitions.extend(metapartition.metapartitions.copy())
new_object.metapartitions = new_metapartitions
return new_object
@staticmethod
def from_dict(dct):
"""
Create a :class:`~kartothek.io_components.metapartition.MetaPartition` from a dictionary.
Parameters
----------
dct : dict
Dictionary containing constructor arguments as keys
Returns
-------
"""
return MetaPartition(
label=dct["label"],
files=dct.get("files", {}),
metadata=dct.get("metadata", {}),
data=dct.get("data", {}),
indices=dct.get("indices", {}),
metadata_version=dct.get("metadata_version", None),
dataset_metadata=dct.get("dataset_metadata", {}),
table_meta=dct.get("table_meta", {}),
partition_keys=dct.get("partition_keys", None),
logical_conjunction=dct.get("logical_conjunction", None),
)
def to_dict(self):
return {
"label": self.label,
"files": self.files or {},
"data": self.data or {},
"indices": self.indices,
"metadata_version": self.metadata_version,
"dataset_metadata": self.dataset_metadata,
"table_meta": self.table_meta,
"partition_keys": self.partition_keys,
"logical_conjunction": self.logical_conjunction,
}
@_apply_to_list
def remove_dataframes(self):
"""
Remove all dataframes from the metapartition in memory.
"""
return self.copy(data={})
def _split_predicates_in_index_and_content(self, predicates):
"""
Split a list of predicates in the parts that can be resolved by the
partition columns and the ones that are persisted in the data file.
"""
# Predicates are split in this function into the parts that apply to
# the partition key columns `key_part` and the parts that apply to the
# contents of the file `content_part`.
split_predicates = []
has_index_condition = False
for conjunction in predicates:
key_part = []
content_part = []
for literal in conjunction:
if literal.column in self.partition_keys:
has_index_condition = True
key_part.append(literal)
else:
content_part.append(literal)
split_predicates.append(_SplitPredicate(key_part, content_part))
return split_predicates, has_index_condition
def _apply_partition_key_predicates(self, table, indices, split_predicates):
"""
Apply the predicates to the partition_key columns and return the remaining
predicates that should be pushed to the DataFrame serialiser.
"""
# Construct a single line DF with the partition columns
schema = self.table_meta[table]
index_df_dct = {}
for column, value in indices:
pa_dtype = schema[schema.get_field_index(column)].type
value = IndexBase.normalize_value(pa_dtype, value)
if pa.types.is_date(pa_dtype):
index_df_dct[column] = pd.Series(
pd.to_datetime([value], infer_datetime_format=True)
).dt.date
else:
dtype = pa_dtype.to_pandas_dtype()
index_df_dct[column] = pd.Series([value], dtype=dtype)
index_df = pd.DataFrame(index_df_dct)
filtered_predicates = []
for conjunction in split_predicates:
predicates = [conjunction.key_part]
if (
len(conjunction.key_part) == 0
or len(
filter_df_from_predicates(
index_df, predicates, strict_date_types=True
)
)
> 0
):
if len(conjunction.content_part) > 0:
filtered_predicates.append(conjunction.content_part)
else:
# A condititon applies to the whole DataFrame, so we need to
# load all data.
return None
return filtered_predicates
@default_docs
@_apply_to_list
def load_dataframes(
self,
store,
tables=None,
columns=None,
predicate_pushdown_to_io=True,
categoricals=None,
dates_as_object=False,
predicates=None,
):
"""
Load the dataframes of the partitions from store into memory.
Parameters
----------
tables : list of string, optional
If a list is supplied, only the given tables of the partition are
loaded. If the given table does not exist it is ignored.
Examples
.. code::
>>> part = MetaPartition(
... label='part_label'
... files={
... 'core': 'core_key_in_store',
... 'helper': 'helper_key_in_store'
... }
... )
>>> part.data
{}
>>> part = part.load_dataframes(store, ['core'])
>>> part.data
{
'core': pd.DataFrame()
}
"""
if columns is None:
columns = {}
if categoricals is None:
categoricals = {}
LOGGER.debug("Loading internal dataframes of %s", self.label)
if len(self.files) == 0:
# This used to raise, but the specs do not require this, so simply do a no op
LOGGER.debug("Partition %s is empty and has not tables/files", self.label)
return self
new_data = copy(self.data)
predicates = _combine_predicates(predicates, self.logical_conjunction)
predicates = _predicates_to_named(predicates)
for table, key in self.files.items():
table_columns = columns.get(table, None)
categories = categoricals.get(table, None)
dataset_uuid, _, indices, file_name = decode_key(key)
if tables and table not in tables:
continue
# In case the columns only refer to the partition indices, we need to load at least a single column to
# determine the length of the required dataframe.
if table_columns is None or (
table_columns is not None
and self.partition_keys
and set(table_columns) == set(self.partition_keys)
):
table_columns_to_io = None
else:
table_columns_to_io = table_columns
filtered_predicates = predicates
self._load_table_meta(dataset_uuid=dataset_uuid, table=table, store=store)
# Filter predicates that would apply to this partition and remove the partition columns
if predicates:
# Check if there are predicates that match to the partition columns.
# For these we need to check if the partition columns already falsify
# the conditition.
#
# We separate these predicates into their index and their Parquet part.
split_predicates, has_index_condition = self._split_predicates_in_index_and_content(
predicates
)
filtered_predicates = []
if has_index_condition:
filtered_predicates = self._apply_partition_key_predicates(
table, indices, split_predicates
)
else:
filtered_predicates = [
pred.content_part for pred in split_predicates
]
# Remove partition_keys from table_columns_to_io
if (
self.partition_keys
and table_columns_to_io
and len(set(self.partition_keys) & set(table_columns_to_io)) > 0
):
keys_to_remove = set(self.partition_keys) & set(table_columns_to_io)
# This is done to not change the ordering of the list
table_columns_to_io = [
c for c in table_columns_to_io if c not in keys_to_remove
]
start = time.time()
df = DataFrameSerializer.restore_dataframe(
key=key,
store=store,
columns=table_columns_to_io,
categories=categories,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=filtered_predicates,
date_as_object=dates_as_object,
)
LOGGER.debug("Loaded dataframe %s in %s seconds.", key, time.time() - start)
# Metadata version >=4 parse the index columns and add them back to the dataframe
df = self._reconstruct_index_columns(
df=df,
key_indices=indices,
table=table,
columns=table_columns,
categories=categories,
date_as_object=dates_as_object,
)
df.columns = df.columns.map(ensure_string_type)
if table_columns is not None:
# TODO: When the write-path ensures that all partitions have the same column set, this check can be
# moved before `DataFrameSerializer.restore_dataframe`. At the position of the current check we
# may want to double check the columns of the loaded DF and raise an exception indicating an
# inconsistent dataset state instead.
missing_cols = set(table_columns).difference(df.columns)
if missing_cols:
raise ValueError(
"Columns cannot be found in stored dataframe: {}".format(
", ".join(sorted(missing_cols))
)
)
df = df.loc[:, table_columns]
new_data[table] = df
return self.copy(data=new_data)
@_apply_to_list
def load_all_table_meta(self, store, dataset_uuid):
"""
Loads all table metadata in memory and stores it under the `tables` attribute
"""
for table in self.files:
self._load_table_meta(dataset_uuid, table, store)
return self
def _load_table_meta(self, dataset_uuid, table, store):
if table not in self.table_meta:
_common_metadata = read_schema_metadata(
dataset_uuid=dataset_uuid, store=store, table=table
)
self.table_meta[table] = _common_metadata
return self
def _reconstruct_index_columns(
self, df, key_indices, table, columns, categories, date_as_object
):
if len(key_indices) == 0:
return df
index_cols = []
original_columns = list(df.columns)
pd_index = pd.RangeIndex(stop=len(df))
zeros = np.zeros(len(df), dtype=int)
schema = self.table_meta[table]
for primary_key, value in key_indices:
# If there are predicates, don't reconstruct the index if it wasn't requested
if columns is not None and primary_key not in columns:
continue
pa_dtype = schema.field_by_name(primary_key).type
dtype = pa_dtype.to_pandas_dtype()
convert_to_date = False
if date_as_object and pa_dtype in [pa.date32(), pa.date64()]:
convert_to_date = True
if isinstance(dtype, type):
value = dtype(value)
elif isinstance(dtype, np.dtype):
if dtype == np.dtype("datetime64[ns]"):
value = pd.Timestamp(value)
else:
value = dtype.type(value)
else:
raise RuntimeError(
"Unexepected object encountered: ({}, {})".format(
dtype, type(dtype)
)
)
if categories and primary_key in categories:
if convert_to_date:
cats = pd.Series(value).dt.date
else:
cats = [value]
cat = | pd.Categorical.from_codes(zeros, categories=cats) | pandas.Categorical.from_codes |
import os
import pandas as pd
import numpy as np
from imblearn.over_sampling import SMOTE
def Imputer(data, kind = "mean"):
df = data.copy()
for feature in df.columns:
if df[feature].dtype == "float":
if kind == "mean":
df[feature] = df[feature].fillna(df[feature].mean())
elif kind == "median":
df[feature] = df[feature].fillna(df[feature].median())
elif kind == "mode":
df[feature] = df[feature].fillna(df[feature].mode()[0])
elif df[feature].dtype == "object":
df[feature] = df[feature].fillna(df[feature].mode()[0])
return df
def cimputer(fname: str,
kind: str = "mean",
dateCol: str = None,
dataDir: str = "data") -> None:
if not os.path.isdir(dataDir):
os.mkdir(dataDir)
if dateCol != "":
df = pd.read_csv(fname, parse_dates=[dateCol])
else:
df = pd.read_csv(fname)
dfImp = Imputer(df, kind)
if fname.find(f"{dataDir}/") != -1:
dfImp.to_csv(f"./{fname[:-4]}_{kind}_imputed.csv", index=False)
else:
dfImp.to_csv(f"./{dataDir}/{fname[:-4]}_{kind}_imputed.csv", index=False)
def Resample(data, replace, n_samples):
indices = data.index
random_sampled_indices = np.random.choice(indices,
size=n_samples,
replace=replace)
return data.loc[random_sampled_indices]
def cresample(fname: str,
target: str,
neg_value: str,
pos_value: str,
kind: str,
dateCol: str = None,
random_state = 123,
dataDir: str = "data") -> None:
if not os.path.isdir(dataDir):
os.mkdir(dataDir)
if kind == "smote":
df = pd.read_csv(fname, header=None)
else:
if dateCol != "":
df = pd.read_csv(fname, parse_dates=[dateCol])
else:
df = pd.read_csv(fname)
negClass = df[df[target] == neg_value]
posClass = df[df[target] == pos_value]
df = df.drop("Date", axis=1)
if kind == "oversample":
posOverSampled = Resample(data=posClass, replace=True, n_samples=len(negClass))
overSampled = pd.concat([negClass, posOverSampled])
if fname.find(f"{dataDir}/") != -1:
overSampled.to_csv(f"./{fname[:-4]}_oversampled.csv", index=False)
else:
overSampled.to_csv(f"./{dataDir}/{fname[:-4]}_oversampled.csv", index=False)
if kind == "undersample":
negUnderSampled = Resample(data=negClass, replace=False, n_samples=len(posClass))
underSampled = pd.concat([negUnderSampled, posClass])
if fname.find(f"{dataDir}/") != -1:
underSampled.to_csv(f"./{fname[:-4]}_undersampled.csv", index=False)
else:
underSampled.to_csv(f"./{dataDir}/{fname[:-4]}_undersampled.csv", index=False)
if kind == "smote":
so = SMOTE()
features, targets = so.fit_resample(df.iloc[:, :-1], df.iloc[:,-1])
smoteSampled = pd.concat([ | pd.DataFrame(features) | pandas.DataFrame |
# http://github.com/timestocome
# take a look at the differences in daily returns for recent bull and bear markets
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
# pandas display options
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 25
pd.options.display.width = 1000
######################################################################
# data
########################################################################
# read in datafile created in LoadAndMatchDates.py
data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True)
features = [data.columns.values]
# create target --- let's try Nasdaq value 1 day change
data['returns'] = (data['NASDAQ'] - data['NASDAQ'].shift(1)) / data['NASDAQ']
# remove nan row from target creation
data = data.dropna()
'''
############################################################################
# plot returns on NASDAQ training data
#############################################################################
fig = plt.figure(figsize=(10,10))
plt.subplot(2,1,2)
plt.subplot(2,1,1)
plt.plot(data['returns'])
plt.title("Nasdaq daily returns")
# histogram of returns
plt.subplot(2,1,2)
plt.hist(data['returns'], bins=200)
plt.xlabel("Returns")
plt.ylabel("Probability")
plt.title("Histogram daily Nasdaq returns")
plt.grid(True)
# median
median_return = data['returns'].median()
l = plt.axvspan(median_return-0.0001, median_return+0.0001, color='red')
plt.show()
'''
#########################################################################
# split into bear and bull markets
##########################################################################
bull1_start = pd.to_datetime('01-01-1990') # beginning of this dataset
bull1_end = pd.to_datetime('07-16-1990')
iraq_bear_start = pd.to_datetime('07-17-1990')
iraq_bear_end = pd.to_datetime('10-11-1990')
bull2_start = pd.to_datetime('10-12-1990')
bull2_end = pd.to_datetime('01-13-2000')
dotcom_bear_start = pd.to_datetime('01-14-2000')
dotcom_bear_end = pd.to_datetime('10-09-2002')
bull3_start = pd.to_datetime('10-10-2002')
bull3_end = pd.to_datetime('10-08-2007')
housing_bear_start = pd.to_datetime('10-09-2007')
housing_bear_end = pd.to_datetime('03-09-2009')
bull4_start = pd.to_datetime('03-10-2009')
bull4_end = pd.to_datetime('12-31-2016') # end of this dataset
bull1 = data.loc[data.index <= bull1_end]
bear1 = data.loc[(data.index >= iraq_bear_start) & (data.index <= iraq_bear_end)]
bull2 = data.loc[(data.index >= bull2_start) & (data.index <= bull2_end)]
bear2 = data.loc[(data.index >= dotcom_bear_start) & (data.index <= dotcom_bear_end)]
bull3 = data.loc[(data.index >= bull3_start) & (data.index <= bull3_end)]
bear3 = data.loc[(data.index >= housing_bear_start) & (data.index <= housing_bear_end)]
bull4 = data.loc[data.index >= bull4_start]
####################################################################
# plot bear/bull markets - only the complete ones -
###################################################################
plt.figure(figsize=(16,16))
n_bins = 40
plt.suptitle("Returns for bear/bull markets Jan 1990-Dec 2015")
plt.subplot(7,2,1)
plt.title("Jan 90-Jul 90 Bull")
plt.plot(bull1['returns'], color='green')
plt.ylim(-0.15, .15)
plt.xlim(pd.to_datetime('01-01-1990'), pd.to_datetime('12-31-2016'))
plt.subplot(7,2,2)
plt.title("Jan 90-Jul 90 Bull")
plt.hist(bull1['returns'], range=[-0.15, 0.15], bins=n_bins, color='green', normed=True)
plt.ylim(0, 50)
median_return = bull1['returns'].median()
l = plt.axvspan(median_return-0.001, median_return+0.001, color='yellow')
plt.subplot(7,2,3)
plt.title("July90-Oct 90")
plt.plot(bear1['returns'], color='red')
plt.ylim(-0.15, .15)
plt.xlim(pd.to_datetime('01-01-1990'), pd.to_datetime('12-31-2016'))
plt.subplot(7,2,4)
plt.title("July 90-Oct 90")
plt.hist(bear1['returns'], range=[-0.15, 0.15], bins=n_bins, color='red', normed=True)
plt.ylim(0, 50)
median_return = bear1['returns'].median()
l = plt.axvspan(median_return-0.001, median_return+0.001, color='yellow')
plt.subplot(7,2,5)
plt.title("Oct 90-Jan 00")
plt.plot(bull2['returns'], color='green')
plt.ylim(-0.15, .15)
plt.xlim(pd.to_datetime('01-01-1990'), pd.to_datetime('12-31-2016'))
plt.subplot(7,2,6)
plt.title("Oct 90-Jan 00")
plt.hist(bull2['returns'], range=[-0.15, 0.15], bins=n_bins, color='green', normed=True)
plt.ylim(0, 50)
median_return = bull2['returns'].median()
l = plt.axvspan(median_return-0.001, median_return+0.001, color='yellow')
plt.subplot(7,2,7)
plt.title("Jan 00-Oct 02")
plt.plot(bear2['returns'], color='red')
plt.ylim(-0.15, .15)
plt.xlim(pd.to_datetime('01-01-1990'), | pd.to_datetime('12-31-2016') | pandas.to_datetime |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = | concat([ts[0:4],ts[-4:],ts[4:-4]]) | pandas.concat |
from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
import datetime
api_key = "<KEY>"
api_secret = "<KEY>"
client = Client(api_key, api_secret)
def get_pd_daily_histo(pair, since):
##### get historical data
historical = client.get_historical_klines(pair, Client.KLINE_INTERVAL_1DAY, since)
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open_Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close_Time', 'Quote_Asset_Volume',
'Number_of_Trades', 'TB_Base_Volume', 'TB_Quote_Volume', 'Ignore']
hist_df = hist_df.drop(['Quote_Asset_Volume', 'TB_Base_Volume', 'TB_Quote_Volume','Ignore'], axis=1)
hist_df['Open_Time'] = | pd.to_datetime(hist_df['Open_Time']/1000, unit='s') | pandas.to_datetime |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not Index(i.values).is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',
'nl0000289783',
'nl0000289965', 'nl0000301109']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
def test_is_monotonic_decreasing(self):
i = MultiIndex.from_product([np.arange(9, -1, -1),
np.arange(9, -1, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not | Index(i.values) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 16:51:35 2021
GEOM4009- Final Report Code
Authors: Rajpal, Jacob, Joshua, Yussuf, Gillian
The following code aims to:
-Read in a dataframe containing at least an Object_ID, X positional data, Y positional data, and Timestamp data
- identify which column represent object ID
- which columns represent x,y positional data
- which column represents time data
-Simplify the number of points within the data using Ramer–Douglas–Peucker algorithm
-Determine the time span in the dataset
-Decide on the vertical exaggeration (time scale) of the data
-Convert 2D point data into a 3D dataset with time represented on Z axis – join points with lines
-Export to KML (or other format) for display
-determine the distance between two objects at all shared time-measurements
"""
import os
import pandas as pd
import geopandas as gpd
import datetime
import numpy as np
from shapely.geometry import Point
from shapely.geometry import LineString
import fiona
from time import time
from sklearn.neighbors import DistanceMetric
#Gillian's function
def set_directory (directory_path): #This function auto-sets the directory based on user specified pathway
"""
Parameters
----------
Directory_path : Pathway to the user directory
Used to set the directory and determine where to search for files
Returns
-------
A directory set by the user #*# Actually this doesn't return anything... (None)
"""
direct= directory_path
try:
os.chdir(direct) #to set the directory specified by the user
os.listdir() #This will allow the user to check that the directory is in the location
except:
print("There is something wrong with the directory inputted") #Warns the user they did not type in directory name properly
#Gillian's function
def read_file (filename, ob_id, x, y,time_pos):
"""
Parameters
----------
filename : name of csv file
filename is used to locate file in the directory to be read in as dataframe
Returns
-------
a pandas dataframe
"""
try:
df = pd.read_csv(filename)# Here the main files will be read in as a pandas dataframe by the user
id_num= int(ob_id- 1) #subtracting 1 as the index starts at 0 in python
x_col= int(x-1) #subtracting 1 as the index starts at 0 in python
y_col= int(y-1) #subtracting 1 as the index starts at 0 in python
time= int(time_pos-1) #subtracting 1 as the index starts at 0 in python
new_df=df.rename(columns={df.columns[id_num]: "object_id", #renaming the columns by user specified index values
df.columns[x_col]: "x_pos_data",
df.columns[y_col]: "y_pos_data",
df.columns[time]: "timestamp"})
except:
print ("filename could not be found")
return new_df
#Jacob's function
def firstLast (df):
"""
#*# What does this function do?
Parameters
----------
df : TYPE
Calling on the converted geospatial dataframe from the previous function.
This geospatial dataframe has the renamed columns that will be used throughout the code
time_pos : TYPE
This is what will be called on later in the command line
when the user puts the column number containing timestamp values.
Returns
-------
None.
"""
## Here, the the variables start time and end time are assigned by
## picking the first, and last row of the time position column
#df['timestamp'] = pd.to_datetime(df['timestamp'], infer_datetime_format=True)
df['timestamp']=pd.to_datetime(df['timestamp'], format='%d/%m/%Y %H:%M')
time_df= df.sort_values(by="timestamp")
time= time_df["timestamp"]
startTime = time.iloc[0]
endTime = time.iloc[-1]
difference = endTime - startTime
return startTime, endTime, difference
#Yussuf's function
def simple(df, lats , longs):
"""
Parameters
----------
df : dataframe
lats : Latitude ID
longs : Longitude ID
Returns
-------
result : Simplified df
"""
#creates a empty df to concat all the simplfied versions too
result = | pd.DataFrame() | pandas.DataFrame |
import unittest
from pangoro.preprocessing import PangoroDataFrame
import pandas as pd
def broken_function():
raise Exception('This is broken')
class TestDataFrame(unittest.TestCase):
def test_simple_dataframe(self):
df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
df2 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
self.assertEqual(True, df1.equals(df2))
def test_numerical_transformation_unchanged(self):
sample_data = pd.DataFrame({'pet': ['cat', 'dog', 'dog', pd.NA, 'cat', 'dog', 'cat', 'fish'],
'color': ['white', 'brown', 'black', 'gold', pd.NA,'black', 'white', 'silver'],
'weight': [50.5, 22.5, 4, pd.NA , 12, 39, 16, pd.NA]})
sample_data['weight'] = pd.to_numeric(sample_data['weight'])
#sample_data.dtypes
pdf = PangoroDataFrame(sample_data)
pdf.numerical_transformation()# without passing any parameters, this should not perform any change
self.assertEqual(True, pdf.equals(sample_data))
def test_numerical_transformation_na_treat_mode(self):
df1 = pd.DataFrame({'a': [1, pd.NA,1,1,1,1,1,8,9.0,pd.NA]})
df2 = | pd.DataFrame({'a': [1, 1,1,1,1,1,1,8,9,1.0]}) | pandas.DataFrame |
from __future__ import print_function, division, absolute_import
import os.path
import pandas as pd
import pytest
from sqlalchemy import create_engine
from sqlalchemy.engine import reflection
from sqlalchemy import MetaData, Table, String, Column
from sqlalchemy.sql import select, not_
from framequery import util
metadata = MetaData()
pg_namespace = Table(
'pg_namespace', metadata,
Column('nspname', String())
)
@pytest.mark.parametrize('qs', ['', '?model=dask'])
def test_create_engine_connect(qs):
engine = create_engine('framequery:///' + qs)
with engine.connect():
pass
def test_add_dataframe_query():
engine = create_engine('framequery:///')
engine.executor.update(foo=pd.DataFrame({'foo': [0, 1, 2]}))
assert engine.execute('select * from foo').fetchall() == [(0,), (1,), (2,)]
def test_duplicate_names():
engine = create_engine('framequery:///')
engine.executor.update(foo= | pd.DataFrame({'foo': [0, 1, 2]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
class TestPeriodIndexSeriesMethods(object):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with | tm.assert_raises_regex(TypeError, msg) | pandas.util.testing.assert_raises_regex |
import pandas as pd
from scripts.python.routines.manifest import get_manifest
import numpy as np
from tqdm import tqdm
import plotly.graph_objects as go
import statsmodels.formula.api as smf
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scripts.python.EWAS.routines.correction import correct_pvalues
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.scatter import add_scatter_trace
from scripts.python.routines.plot.layout import add_layout
from scripts.python.pheno.datasets.filter import filter_pheno, get_passed_fields
from scripts.python.pheno.datasets.features import get_column_name, get_default_statuses_ids, get_status_dict, get_default_statuses, get_sex_dict
from pathlib import Path
from scripts.python.routines.betas import betas_drop_na
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
datasets = ["GSE53740"]
is_rerun = True
num_cpgs_to_plot = 10
feats = {
"DNAmPhenoAgeAcc": "DNAmPhenoAgeAcc",
"DNAmGrimAgeAcc": "DNAmGrimAgeAcc"
}
for dataset in datasets:
print(dataset)
platform = datasets_info.loc[dataset, 'platform']
manifest = get_manifest(platform)
statuses = get_default_statuses(dataset)
status_col = get_column_name(dataset, 'Status').replace(' ', '_')
statuses_ids = get_default_statuses_ids(dataset)
status_dict = get_status_dict(dataset)
status_passed_fields = get_passed_fields(status_dict, statuses)
status_1_cols = [status_dict['Control'][x].column for x in statuses_ids['Control']]
status_1_label = ', '.join([status_dict['Control'][x].label for x in statuses_ids['Control']])
status_2_cols = [status_dict['Case'][x].column for x in statuses_ids['Case']]
status_2_label = ', '.join([status_dict['Case'][x].label for x in statuses_ids['Case']])
age_col = get_column_name(dataset, 'Age').replace(' ', '_')
sex_col = get_column_name(dataset, 'Sex').replace(' ', '_')
sex_dict = get_sex_dict(dataset)
continuous_vars = {'Age': age_col}
categorical_vars = {
status_col: [x.column for x in status_passed_fields],
sex_col: [sex_dict[x] for x in sex_dict]
}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
betas = pd.read_pickle(f"{path}/{platform}/{dataset}/betas.pkl")
betas = betas_drop_na(betas)
df = | pd.merge(pheno, betas, left_index=True, right_index=True) | pandas.merge |
# data loading
__author__ = 'Guen'
import sys,os,glob,fnmatch,datetime,time
import configparser, logging
import numpy as np
import pandas as pd
import json
from .gutil import get_config
from PyQt4 import QtGui
import imp
config = get_config()
_DATA_FOLDER = config.get('Data','DataFolder')
if 'DATA_DIR' in os.environ.keys():
_DATA_FOLDER = os.environ['DATA_DIR']
_ANALYSIS_FOLDER = config.get('Analysis', 'AnalysisFolder')
sys.path.append(_ANALYSIS_FOLDER)
try:
_analysis_module = __import__(config.get('Analysis', 'AnalysisModule'))
except:
logging.warning("Analysis module error")
_analysis_module = None
def get_latest_stamp():
return max(glob.iglob('*.dat'), key=os.path.getctime)
def analyse(stamp, analysis_module = _analysis_module):
'''
Perform module_name.do_analyse function on d (pandas dataframe) and return result
'''
d = get(stamp)
m=imp.reload(_analysis_module)
return m.do_analyse(d)
def load():
'''
Open file dialog and load .dat or .csv
Return pandas dataframe with bonus attributes .filepath, .stamp and .meta (from meta/json file)
'''
if not QtGui.QApplication.instance():
QtGui.QApplication(sys.argv)
fileDialog = QtGui.QFileDialog()
filepath = fileDialog.getOpenFileName(directory = _DATA_FOLDER)
extension = filepath[-4:]
if '.dat' in filepath:
d = | pd.read_csv(filepath,sep='\t') | pandas.read_csv |
#!/usr/bin/env python
"""Tests for `openml_speed_dating_pipeline_steps` package."""
import unittest
from sklearn import datasets
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from openml_speed_dating_pipeline_steps import (
openml_speed_dating_pipeline_steps
as pipeline_steps
)
class TestOpenml_speed_dating_pipeline_steps(unittest.TestCase):
"""Tests for `openml_speed_dating_pipeline_steps` package."""
def setUp(self):
"""Set up test fixtures, if any."""
iris = datasets.load_iris()
self.data = pd.DataFrame(data=iris.data, columns=iris.feature_names)
self.range_col = iris.feature_names[0] + 'range'
self.range_orig = iris.feature_names[0]
self.data[self.range_col] = self.data[iris.feature_names[0]].apply(
lambda x: '[{}-{}]'.format(x, x+1)
)
self.numeric_difference = pipeline_steps.NumericDifferenceTransformer()
self.range_transformer = pipeline_steps.RangeTransformer()
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_numeric_difference_columns(self):
"""Test that numeric differences returns the
right number of columns."""
assert(len(
self.numeric_difference.fit_transform(self.data).columns
) == 6)
def test_001_numeric_difference_coltypes(self):
transformed = self.numeric_difference.fit_transform(self.data)
for col in transformed.columns:
assert is_numeric_dtype(transformed[col])
def test_002_range_columns(self):
"""Test that numeric differences returns the
right number of columns."""
assert(len(
self.range_transformer.fit_transform(
self.data[self.range_col]
).columns
) == 1)
def test_003_range_coltypes(self):
transformed = self.range_transformer.fit_transform(
self.data[self.range_col]
)
for col in transformed.columns:
assert | is_numeric_dtype(transformed[col]) | pandas.api.types.is_numeric_dtype |
#!/global/common/software/lsst/common/miniconda/current/envs/stack/bin/python
# set mode: which class from which to match the hosts
import sys
from sklearn.neighbors import NearestNeighbors
import numpy as np
from matplotlib import pyplot as plt
import os
import GCRCatalogs
from astropy.io import fits
import pandas as pd
from astropy.cosmology import Planck15 as P15
from astropy import units as u
import matplotlib
import time
import seaborn as sns
from itertools import product
from collections import Counter
print("Made it into the file!")
from sklearn.preprocessing import StandardScaler
import numpy.ma as ma
import multiprocessing as mp
import sys
#mode = sys.argv[1]
plotting = False
full = True
if full:
tot = 3000000
else:
tot = 5000
fn = '/global/cscratch1/sd/agaglian/GHOSTwithImageSizes.csv'
ghost = pd.read_csv(fn)
transient_class = ghost['TransientClass']
gMag_G = ghost['gKronMag_SDSS_abs']
gMag_R = ghost['rKronMag_SDSS_abs']
gMag_I = ghost['iKronMag_SDSS_abs']
gMag_Z = ghost['zKronMag_SDSS_abs']
g_rshift = ghost['NED_redshift']
g_rshift2 = ghost['TransientRedshift']
#g_ellip = ghost['r_ellip']
g_gr = ghost['g-r_SDSS_rest']
g_ri = ghost['r-i_SDSS_rest']
g_iz = ghost['i-z_SDSS_rest']
#g_Rkpc = np.sqrt(np.nanmean([ghost['gR_kpc']**2, ghost['rR_kpc']**2, ghost['iR_kpc']**2, ghost['zR_kpc']**2, ghost['yR_kpc']**2], axis=0)) # radius in kpc, averaged across bands
# keep track of indices from original file
og_ghost_idx = np.arange(len(ghost))
keydata = np.vstack((gMag_G, gMag_R, gMag_I, gMag_Z, g_gr, g_ri, g_iz, g_rshift, g_rshift2)).T #g_Rkpc,
# first remove all -999s:
keydata[np.logical_or(keydata<-50,keydata>100)] = np.nan
# get rid of redshifts with nan
delete_znans = []
z_nans = 0
for i in range(len(keydata)):
if np.isnan(keydata[i,7]):
z_nans += 1
for i in range(len(keydata)):
if np.isnan(keydata[i,7]):
# if transient redshift is not nan, replace with transient redshift
if not np.isnan(keydata[i,8]):
keydata[i,7] = keydata[i,8]
else:
delete_znans.append(i)
if keydata[i,7] <= 0:
delete_znans.append(i)
keydata = np.delete(keydata, delete_znans, axis=0)
og_ghost_idx = np.delete(og_ghost_idx, delete_znans)
delete_rows = []
# delete rows with more than one nan
for i in range(len(keydata)):
if np.isnan(np.sum(keydata[i])):
nan_counter = 0
for j in range(1, len(keydata[i])):
if np.isnan(keydata[i,j]):
nan_counter+=1
if nan_counter > 1:
delete_rows.append(i)
keydata = np.delete(keydata, delete_rows, axis=0)
og_ghost_idx = np.delete(og_ghost_idx, delete_rows)
# finally for rows with just one nan, replace with the average value
for i in range(len(keydata)):
if np.isnan(np.sum(keydata[i])):
for j in range(1, len(keydata[i])):
if np.isnan(keydata[i,j]):
keydata[i,j] = np.nanmean(keydata[:,j])
gG = keydata[:,0]
gR = keydata[:,1]
gI = keydata[:,2]
gZ = keydata[:,3]
g_gr = keydata[:,4]
g_ri = keydata[:,5]
g_iz = keydata[:,6]
#g_Rkpc = keydata[:,7]
g_rshift = keydata[:,7]
ghost_objIDs = ghost['objID'].values[og_ghost_idx]
#!/global/common/software/lsst/common/miniconda/current/envs/stack/bin/python
# set mode: which class from which to match the hosts
# read in file of CosmoDC2 galaxies, with PZFlow SFR and redshifts, limited to abs r-band magnitude < -15
# and -0.18 < i-z < 0.5
if full:
cdc2 = pd.read_csv("/global/cscratch1/sd/agaglian/DC2full_pzRedshifts_twentyHealpix_sdss_updMag_Rkpc_Final.tar.gz", memory_map=True, low_memory=True)
else:
cdc2 = | pd.read_csv("/global/cscratch1/sd/mlokken/sn_hostenv/DC2_pzRedshifts_SFR_RMag_lt_neg15.csv", memory_map=True, low_memory=True) | pandas.read_csv |
"""
A collection of input/output tools for PyChamberFlux
(c) 2016-2017 <NAME> <<EMAIL>>
"""
import yaml
import glob
import warnings
import pandas as pd
# a collection of date parsers for timestamps stored in multiple columns
# This does not support month-first (American) or day-first (European) format,
# put them by the year-month-day order (ISO 8601) using index orders.
date_parsers_dict = {
# date only
'ymd': lambda s: pd.to_datetime(s, format='%Y %m %d'),
# down to minute
'ymdhm': lambda s: pd.to_datetime(s, format='%Y %m %d %H %M'),
# down to second
'ymdhms': lambda s: pd.to_datetime(s, format='%Y %m %d %H %M %S'),
# down to nanosecond
'ymdhmsf': lambda s: pd.to_datetime(s, format='%Y %m %d %H %M %S %f')
}
def load_config(filepath):
"""Load YAML configuration file from a given filepath."""
with open(filepath, 'r') as stream:
try:
config = yaml.load(stream)
except yaml.YAMLError as exc_yaml:
print(exc_yaml)
config = {} # return a blank dict if fail to load
return config
def load_tabulated_data(data_name, config, query=None):
"""
A general function to read tabulated data (biometeorological,
concentration, flow rate, leaf area, and timelag data).
Parameters
----------
data_name : str
Data name, allowed values are
- 'biomet': biometeorological data
- 'conc': concentration data
- 'flow': flow rate data
- 'leaf': leaf area data
- 'timelag': timelag data
config : dict
Configuration dictionary parsed from the YAML config file.
query : list
List of the query strings used to search in all available data files.
If `None` (default), read all data files.
Return
------
df : pandas.DataFrame
The loaded tabulated data.
"""
# check the validity of `data_name` parameter
if data_name not in ['biomet', 'conc', 'flow', 'leaf', 'timelag']:
raise RuntimeError('Wrong data name. Allowed values are ' +
"'biomet', 'conc', 'flow', 'leaf', 'timelag'.")
# search file list
data_flist = glob.glob(config['data_dir'][data_name + '_data'])
data_flist = sorted(data_flist) # HOTFIX: must sort by name
# get the data settings
data_settings = config[data_name + '_data_settings']
if type(query) is str:
# `query` must be a list
query = [query]
if query is not None:
data_flist = [f for f in data_flist if any(q in f for q in query)]
data_flist = sorted(data_flist) # ensure the list is sorted by name
# check data existence
if not len(data_flist):
print('Cannot find the %s data file!' % data_name)
return None
else:
print('%d %s data files are found. ' % (len(data_flist), data_name) +
'Loading...')
# check date parser: if legit, extract it, and if not, set it to `None`
if data_settings['date_parser'] in date_parsers_dict:
date_parser = date_parsers_dict[data_settings['date_parser']]
else:
date_parser = None
read_csv_options = {
'sep': data_settings['delimiter'],
'header': data_settings['header'],
'names': data_settings['names'],
'usecols': data_settings['usecols'],
'dtype': data_settings['dtype'],
'na_values': data_settings['na_values'],
'parse_dates': data_settings['parse_dates'],
'date_parser': date_parser,
'infer_datetime_format': True,
'engine': 'c',
'encoding': 'utf-8', }
df_loaded = \
[pd.read_csv(entry, **read_csv_options) for entry in data_flist]
for entry in data_flist:
print(entry)
try:
df = pd.concat(df_loaded, ignore_index=True)
except ValueError:
# if the list to concatenate is empty
return None
del df_loaded
# parse 'doy' as 'time_doy'
if 'doy' in df.columns and 'time_doy' not in df.columns:
df.rename(columns={'doy': 'time_doy'}, inplace=True)
# parse 'datetime' as 'timestamp'
if 'datetime' in df.columns and 'timestamp' not in df.columns:
df.rename(columns={'datetime': 'timestamp'}, inplace=True)
# echo data status
print('%d lines read from %s data.' % (df.shape[0], data_name))
# the year number to which the day of year values are referenced
year_ref = config['%s_data_settings' % data_name]['year_ref']
# parse time variables if not already exist
if 'timestamp' in df.columns.values:
if type(df.loc[0, 'timestamp']) is not pd.Timestamp:
df['timestamp'] = pd.to_datetime(df['timestamp'], errors='coerce')
# note: no need to catch out-of-bound error if set 'coerce'
if 'time_doy' not in df.columns.values:
# add a time variable in days of year (float) if not already there
year_start = year_ref if year_ref is not None else \
df.loc[0, 'timestamp'].year
df['time_doy'] = (df['timestamp'] -
pd.Timestamp('%d-01-01' % year_start)) / \
pd.Timedelta(days=1)
elif 'time_doy' in df.columns.values:
# starting year must be specified for day of year
year_start = year_ref
df['timestamp'] = | pd.Timestamp('%d-01-01' % year_start) | pandas.Timestamp |
import os
import pickle
import numpy as np
import pandas as pd
import gzip
import fcsparser
# Load Kuzushiji Japanese Handwritten dataset
def load_kmnist(path, dtype="kmnist", kind='train'):
images_path = os.path.join(path, f'{dtype}-{kind}-imgs.npz')
labels_path = os.path.join(path, f'{dtype}-{kind}-labels.npz')
images = np.load(images_path)
images = images.f.arr_0
images = images.reshape(images.shape[0], -1)
labels = np.load(labels_path)
labels = labels.f.arr_0
labels = labels.reshape(-1)
return images, labels
# FASHION MNIST (60000+10000, 784), 26MB
def load_mnist(path, kind="train"): # train, t10k
"""Load MNIST data from `path`"""
labels_path = os.path.join(path, "%s-labels-idx1-ubyte.gz" % kind)
images_path = os.path.join(path, "%s-images-idx3-ubyte.gz" % kind)
with gzip.open(labels_path, "rb") as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)
with gzip.open(images_path, "rb") as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(
len(labels), 784
)
return images, labels
# CIFAR 10 (50000+10000, 3072), 163MB
def load_pickle(f):
return pickle.load(f, encoding="latin1")
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, "rb") as f:
datadict = load_pickle(f)
X = datadict["data"]
Y = datadict["labels"]
X = X.reshape(10000, 3072)
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1, 6):
f = os.path.join(ROOT, "data_batch_%d" % (b,))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, "test_batch"))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(cifar10_dir):
# Load the raw CIFAR-10 data
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
x_train = X_train.astype("float32")
x_test = X_test.astype("float32")
x_train /= 255
x_test /= 255
return x_train, y_train, x_test, y_test
def get_flow_data(ROOT):
fcs_data = fcsparser.parse(os.path.join(ROOT, "pbmc_luca.fcs"))
raw_data = fcs_data[1]
selected_columns = [col for col in raw_data.columns if col.endswith("-A")] + ['Time']
x = np.arcsinh(raw_data[selected_columns].values / 150.0).astype(np.float32, order='C')
return x
def get_data(dname, n_samples=None):
if dname == "spheres":
path = os.path.join(os.getcwd(), "data", "spheres")
df = pd.read_csv(os.path.join(path, 'spheres.csv')) # load data
x = df.drop(columns=['label']).to_numpy()
label = df['label'].to_numpy()
return x, label
elif dname == "allen":
path = os.path.join(os.getcwd(), "data", "allen")
df = pd.read_csv(os.path.join(path, 'allen.csv')) # load data
x = df.drop(columns=['label']).to_numpy()
label = df['label'].to_numpy()
return x, label
elif dname == "spheres_small":
path = os.path.join(os.getcwd(), "data", "spheres")
df = pd.read_csv(os.path.join(path, 'spheres_small.csv')) # load data
x = df.drop(columns=['label']).to_numpy()
label = df['label'].to_numpy()
return x, label
elif dname == "mnist":
path = os.path.join(os.getcwd(), "data", "MNIST", "raw")
return load_mnist(path=path, kind="train") # kind="t10k"
elif dname == "fmnist":
path = os.path.join(os.getcwd(), "data", "FashionMNIST", "raw")
return load_mnist(path=path, kind="train") # kind="t10k"
elif dname == "kmnist":
path = os.path.join(os.getcwd(), "data", "KuzushijiMNIST", "raw")
return load_kmnist(path=path, kind="train") # kind="t10k"
elif dname == "cifar10":
path = os.path.join(os.getcwd(), "data", "cifar-10-batches-py")
x, label, _, _ = get_CIFAR10_data(path)
return x, label
elif dname == "flow":
path = os.path.join(os.getcwd(), "data", "flow", "raw")
x = get_flow_data(path)
return x, np.arange(x.shape[0])
elif dname == "swissroll":
from sklearn import datasets
x, label = datasets.make_swiss_roll(n_samples=n_samples)
return x, label
elif dname == "scurve":
from sklearn import datasets
x, label = datasets.make_s_curve(n_samples=n_samples)
return x, label
elif dname == "single-cell":
path = os.path.join(os.getcwd(), "data", "single-cell")
data_path = os.path.join(path, "sc_10x.count.csv")
label_path = os.path.join(path, "sc_10x.metadata.csv")
x = pd.read_csv(data_path)
x = np.asarray(x)
x = np.swapaxes(x, 0, 1)
labels = pd.read_csv(label_path)
labels = labels['cell_line_demuxlet']
labels = np.asarray(labels)
label_uniq = list(set(labels))
label_uniq.sort()
for i, label in enumerate(labels):
if label == label_uniq[0]:
labels[i] = 0
elif label == label_uniq[1]:
labels[i] = 1
else:
labels[i] = 2
return x, labels
elif dname == "single-cell2":
path = os.path.join(os.getcwd(), "data", "single-cell")
data_path = os.path.join(path, "sc_10x_5cl.count.csv")
label_path = os.path.join(path, "sc_10x_5cl.metadata.csv")
x = | pd.read_csv(data_path) | pandas.read_csv |
import pandas as pd
from collections import Counter
import os
import numpy as np
import socket
np.random.seed(2017)
RAW_DATA = '../raw_data'
RATINGS_FILE = os.path.join(RAW_DATA, 'ml-100k/u.data')
RATINGS = pd.read_csv(RATINGS_FILE, sep='\t', header=None)
USERS_FILE = os.path.join(RAW_DATA, 'ml-100k/u.user')
USERS = pd.read_csv(USERS_FILE, sep='|', header=None)
ITEMS_FILE = os.path.join(RAW_DATA, 'ml-100k/u.item')
ITEMS = pd.read_csv(ITEMS_FILE, sep='|', header=None, encoding="ISO-8859-1")
OUT_DIR = '../dataset/'
def format_user_file(user_df):
formatted = user_df[[0, 1, 2, 3]].copy()
min_age, max_age = 15, 55
formatted[1] = formatted[1].apply(lambda x: max_age if x > max_age else x)
formatted[1] = formatted[1].apply(lambda x: min_age if x < min_age else x)
formatted[1] = formatted[1].apply(lambda x: max_age / 5 if x >= max_age else min_age / 5 if x <= min_age else x / 5)
# print Counter(formatted[1])
formatted[1] = formatted[1].apply(lambda x: int(x - formatted[1].min()))
formatted[2] = formatted[2].apply(lambda x: {'M': 0, 'F': 1}[x])
occupation = dict(
[(o.strip(), i) for i, o in enumerate(open(os.path.join(RAW_DATA, 'ml-100k/u.occupation'), 'r').readlines())])
formatted[3] = formatted[3].apply(lambda x: occupation[x])
formatted = formatted.fillna(-1)
formatted.columns = ['uid', 'u_age', 'u_gender', 'u_occupation']
# print formatted
# print formatted.info()
return formatted
def format_item_file(item_df):
formatted = item_df.drop([1, 3, 4], axis=1).copy()
formatted.columns = range(len(formatted.columns))
formatted[1] = formatted[1].apply(lambda x: int(str(x).split('-')[-1]) if pd.notnull(x) else -1)
min_year = 1989
formatted[1] = formatted[1].apply(lambda x: min_year if 0 < x < min_year else x)
formatted[1] = formatted[1].apply(lambda x: min_year + 1 if min_year < x < min_year + 4 else x)
years = dict([(year, i) for i, year in enumerate(sorted(Counter(formatted[1]).keys()))])
formatted[1] = formatted[1].apply(lambda x: years[x])
formatted.columns = ['iid', 'i_year',
'i_Action', 'i_Adventure', 'i_Animation', "i_Children's", 'i_Comedy',
'i_Crime', 'i_Documentary ', 'i_Drama ', 'i_Fantasy ', 'i_Film-Noir ',
'i_Horror ', 'i_Musical ', 'i_Mystery ', 'i_Romance ', 'i_Sci-Fi ',
'i_Thriller ', 'i_War ', 'i_Western', 'i_Other']
# print Counter(formatted[1])
# print formatted
# print formatted.info()
return formatted
def format_rating(ratings, users, items):
ratings = ratings.drop(3, axis=1).copy()
ratings.columns = ['uid', 'iid', 'rating']
ratings = pd.merge(ratings, users, on='uid', how='left')
ratings = pd.merge(ratings, items, on='iid', how='left')
# print ratings
return ratings
def random_split_data():
dir_name = 'ml-100k-r'
if not os.path.exists(os.path.join(OUT_DIR, dir_name)):
os.mkdir(os.path.join(OUT_DIR, dir_name))
users = format_user_file(USERS)
users.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.users.csv'), index=False)
items = format_item_file(ITEMS)
items.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.items.csv'), index=False)
all_data = format_rating(RATINGS, users, items)
all_data.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.all.csv'), index=False)
all_data = all_data.sample(frac=1).reset_index(drop=True)
train_size = int(len(all_data) * 0.8)
validation_size = int(len(all_data) * 0.1)
train_set = all_data[:train_size]
validation_set = all_data[train_size:train_size + validation_size]
test_set = all_data[train_size + validation_size:]
# print train_set
# print validation_set
# print test_set
train_set.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.train.csv'), index=False)
validation_set.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.validation.csv'), index=False)
test_set.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.test.csv'), index=False)
def split_cold_ui(item_cold_ratio=0.0, user_cold_ratio=0.0, vt_ratio=0.1, suffix=''):
dir_name = 'ml-100k'
if item_cold_ratio > 0:
dir_name += '-i%d' % int(item_cold_ratio * 100)
if user_cold_ratio > 0:
dir_name += '-u%d' % int(user_cold_ratio * 100)
dir_name += suffix
if not os.path.exists(os.path.join(OUT_DIR, dir_name)):
os.mkdir(os.path.join(OUT_DIR, dir_name))
users = format_user_file(USERS)
users.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.users.csv'), index=False)
items = format_item_file(ITEMS)
items.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.items.csv'), index=False)
all_data = format_rating(RATINGS, users, items)
all_data.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.all.csv'), index=False)
remain = all_data.copy()
validation_size = int(len(remain) * vt_ratio)
test_size = int(len(remain) * vt_ratio)
validation_index = []
test_index = []
remain_index = []
cold_item_index, cold_user_index = [], []
if item_cold_ratio > 0:
iid_list = remain.iid.unique().tolist()
np.random.shuffle(iid_list)
for iid in iid_list:
iid_indexes = remain[remain.iid == iid].index.tolist()
if len(cold_item_index) + len(iid_indexes) <= int(2 * validation_size * item_cold_ratio):
cold_item_index.extend(iid_indexes)
remain = remain.drop(iid_indexes)
if len(cold_item_index) + len(iid_indexes) == int(2 * validation_size * item_cold_ratio):
break
cold_item_num = len(cold_item_index) / 2
np.random.shuffle(cold_item_index)
validation_index.extend(cold_item_index[:cold_item_num])
test_index.extend(cold_item_index[cold_item_num:])
if user_cold_ratio > 0:
uid_list = remain.uid.unique().tolist()
np.random.shuffle(uid_list)
for uid in uid_list:
uid_indexes = remain[remain.uid == uid].index.tolist()
if len(cold_user_index) + len(uid_indexes) <= int(2 * validation_size * user_cold_ratio):
cold_user_index.extend(uid_indexes)
remain = remain.drop(uid_indexes)
if len(cold_user_index) + len(uid_indexes) == int(2 * validation_size * user_cold_ratio):
break
cold_user_num = len(cold_user_index) / 2
np.random.shuffle(cold_user_index)
validation_index.extend(cold_user_index[:cold_user_num])
test_index.extend(cold_user_index[cold_user_num:])
remain_uid_index = []
for uid, group in remain.groupby('uid'):
remain_uid_index.extend(group.sample(1).index.tolist())
remain_index.extend(remain_uid_index)
remain = remain.drop(remain_uid_index)
remain_iid_index = []
for iid, group in remain.groupby('iid'):
remain_iid_index.extend(group.sample(1).index.tolist())
remain_index.extend(remain_iid_index)
remain = remain.drop(remain_iid_index)
sample_index = remain.sample(validation_size - len(validation_index)).index.tolist()
validation_index.extend(sample_index)
remain = remain.drop(sample_index)
sample_index = remain.sample(test_size - len(test_index)).index.tolist()
test_index.extend(sample_index)
remain = remain.drop(sample_index)
remain_index.extend(remain.index.tolist())
validation_set = all_data.iloc[validation_index]
test_set = all_data.iloc[test_index]
train_set = all_data.iloc[remain_index]
# print validation_set
# print test_set
# print train_set
# train_set.sample(frac=1).to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.train.csv'), index=False)
# validation_set.sample(frac=1).to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.validation.csv'),
# index=False)
# test_set.sample(frac=1).to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.test.csv'), index=False)
train_set.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.train.csv'), index=False)
validation_set.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.validation.csv'),
index=False)
test_set.to_csv(os.path.join(OUT_DIR, dir_name + '/' + dir_name + '.test.csv'), index=False)
print(len(set(validation_index + test_index + remain_index)))
def change_id(item_cold_ratio=0.0, user_cold_ratio=0.0, prefix='ml-100k-ci', suffix=''):
if item_cold_ratio == 0.0 and user_cold_ratio == 0.0:
split_cold_ui(0.0, 0.0, 0.1, suffix='-ci' + suffix)
return
dir_name = prefix
if item_cold_ratio > 0:
dir_name += '-i%d' % int(item_cold_ratio * 100)
if user_cold_ratio > 0:
dir_name += '-u%d' % int(user_cold_ratio * 100)
dir_name += suffix
if not os.path.exists(os.path.join(OUT_DIR, dir_name)):
os.mkdir(os.path.join(OUT_DIR, dir_name))
train_set = pd.read_csv(os.path.join(OUT_DIR, prefix + '/' + prefix + '.train.csv'))
validation_set = pd.read_csv(os.path.join(OUT_DIR, prefix + '/' + prefix + '.validation.csv'))
test_set = pd.read_csv(os.path.join(OUT_DIR, prefix + '/' + prefix + '.test.csv'))
users = pd.read_csv(os.path.join(OUT_DIR, prefix + '/' + prefix + '.users.csv'))
items = pd.read_csv(os.path.join(OUT_DIR, prefix + '/' + prefix + '.items.csv'))
all_data = pd.read_csv(os.path.join(OUT_DIR, prefix + '/' + prefix + '.all.csv'))
validation_cold_index, test_cold_index = [], []
if item_cold_ratio > 0:
i_columns = [c for c in validation_set.columns if c.startswith('i')]
cold_size = int(len(validation_set) * item_cold_ratio)
validation_set = validation_set.sample(frac=1.0)
max_iid = items['iid'].max()
validation_set['iid'][0:cold_size] = range(max_iid + 1, max_iid + cold_size + 1)
# print validation_set
new_items = validation_set[i_columns][0:cold_size]
items = | pd.concat([items, new_items]) | pandas.concat |
import pandas as pd
from typing import Tuple
from azfs import export_decorator
@export_decorator.register()
def example_1(name: str, b: int, c: str) -> pd.DataFrame:
return pd.DataFrame()
@export_decorator.register()
def example_2() -> Tuple[pd.DataFrame, pd.DataFrame]:
return | pd.DataFrame() | pandas.DataFrame |
"""
Contains methods for making choices.
"""
import numpy as np
import pandas as pd
from patsy import dmatrix
from .wrangling import broadcast, explode
from .sampling import get_probs, get_segmented_probs, randomize_probs, sample2d
def binary_choice(p, t=None):
"""
Performs a binary choice from a series of probabilities.
Paramters:
---------
p: pandas.Series
Series of probabilities.
t: numeric or array-like
Threshold value to test against. If not provided
a random number will be generated.
Returns:
--------
boolean pandas.Series
"""
if t is None:
t = np.random.rand(len(p))
return p > t
def rate_based_binary_choice(rates, rate_col, agents, segment_cols, set_rate_index=True):
"""
Performs a binary choice using a segmented rates table.
The rates imply probabilities and should range from 0 - 1.
Parameters:
-----------
rates: pandas.DataFrame
Data frame containing rates to use as probabilities.
rates_col: string
Column in rates table containing rates/probabilities.
agents: pandas.DataFrame
Data frame containing agents to choose from.
segment_cols: string
List of columns names to link rates to agents.
set_rate_index: bool, optional default True
If true, sets the index on the rates to match the segments.
Returns:
--------
boolean pandas.Series
"""
r = rates
if set_rate_index:
r = rates.set_index(segment_cols)
p = broadcast(r[rate_col], agents, segment_cols)
p.fillna(0)
return binary_choice(p)
def logit_binary_choice(coeff, data):
"""
Performs a binary choice using a logit model.
Parameters:
-----------
coeff: pandas.Series
Series containing coefficients. Index is the variable
name, the value the coefficient.
data: pandas.DataFrame
Table containing data to choose from. Should have
columns for all the coefficents.
SCOTT TODO: how to best allow custom functions in the dmatrix
evaluation?? Need to figure out how to import these.
Returns:
--------
u - pandas.Series of utilities
p - pandas.Series of probabilities
c - pandas.Series of boolean choices
"""
# get the design matrix
if 'intercept' not in data.columns:
data['intercept'] = 1 # should I be copying this first?
coeff_cols = list(coeff.index.values)
model_design = dmatrix(data[coeff_cols], return_type='dataframe')
# get utilties and probabilities
u = np.exp(np.dot(model_design.values, coeff.values.T))
p = u / (1 + u)
# make the choice and return the results
return u, p, binary_choice(p)
def weighted_choice(agents, alternatives, w_col=None, cap_col=None, return_probs=False):
"""
Makes choices based on weights previously assinged to the alternatives.
Parameters:
-----------
agents: pandas.DataFrame or pandas.Series
Agents to make choices.
alternatives: pandas.DataFrame
Choice set of alternatives.
w_col: string, optional, default None.
Column to serve as weights for the choice set.
cap_col: string
Column to serve as capacities for the choice set.
return_probs: bool, optional, default False
If True, probabilities will also be returned.
Returns:
--------
pandas.Series of the chosen indexes, aligned to the agents.
"""
probs = None
if cap_col is None:
# unconstrained choice
if w_col is not None:
probs = get_probs(alternatives[w_col]).values
choice_idx = np.random.choice(alternatives.index.values, len(agents), p=probs)
else:
# capcity limited choice
if w_col is None:
e = explode(alternatives[[cap_col]], cap_col, 'old_idx')
choice_idx = np.random.choice(e['old_idx'].values, len(agents), replace=False)
else:
# make sure we have enough
if len(agents) > alternatives[cap_col].sum():
raise ValueError('Not enough capacity for agents')
# get a row for each unit of capacity
e = explode(alternatives[[w_col, cap_col]], cap_col, 'old_idx')
# make the choice
probs = get_probs(e[w_col] / e[cap_col])
choice_idx = np.random.choice(
e['old_idx'].values, len(agents), p=probs.values, replace=False)
# return the results
choices = pd.Series(choice_idx, index=agents.index)
if return_probs:
return choices, probs # SCOTT, need to add a test for this
else:
return choices
def get_interaction_data(choosers, alternatives, sample_size, sample_replace=True):
"""
Returns an interaction dataset with attributes of both choosers and alternatives,
with the number of alternatives per chooser defined by a sample size.
choosers: pandas.DataFrame
Data frame of agents making choices.
alternatives: pandas.DataFrame
Data frame of alternatives to choose from.
sample_size: int, optional, default 50
Number of alternatives to sample for each agent.
sample_replace: bool, optional, default True
If True, sampled alternatives and choices can be shared across multiple choosers,
If False, this will generate a non-overlapping choiceset.
Returns:
--------
interaction_data: pandas.DataFrame
Data frame with 1 row for each chooser and sampled alternative. Index is a
multi-index with level 0 containing the chooser IDs and level 1 containing
the alternative IDs.
sample_size: int
Sample size used in the sample. This may be smaller than the provided sample
size if the number of alternatives is less than the desired sample size.
"""
num_alts = len(alternatives)
num_choosers = len(choosers)
# sample from the alternatives
if sample_replace:
# allow the same alternative to be sampled across agents
sample_size = min(sample_size, num_alts)
sampled_alt_idx = sample2d(alternatives.index.values, num_choosers, sample_size).ravel()
else:
# non-overlapping choice-set
if num_alts < num_choosers:
raise ValueError("Less alternatives than choosers!")
sample_size = min(sample_size, num_alts / num_choosers)
sampled_alt_idx = np.random.choice(
alternatives.index.values, sample_size * num_choosers, replace=False)
# align samples to match choosers
sampled_alts = alternatives.reindex(sampled_alt_idx)
alt_idx_name = sampled_alts.index.name
if alt_idx_name is None:
alt_idx_name = 'alternative_id'
sampled_alts.index.name = alt_idx_name
sampled_alts.reset_index(inplace=True)
# link choosers w/ sampled alternatives
choosers_r = choosers.reindex(choosers.index.repeat(sample_size))
chooser_idx_name = choosers_r.index.name
if chooser_idx_name is None:
chooser_idx_name = 'chooser_id'
choosers_r.index.name = chooser_idx_name
sampled_alts.index = choosers_r.index
interaction_data = pd.concat([choosers_r, sampled_alts], axis=1)
interaction_data.set_index(alt_idx_name, append=True, inplace=True)
return interaction_data, sample_size
def choice_with_sampling(choosers,
alternatives,
probs_callback,
sample_size=50,
sample_replace=True,
verbose=False,
**prob_kwargs):
"""
Performs a weighted choice while sampling alternatives. Supports
attributes on both the chooser and the alternatives.
Parameters:
-----------
choosers: pandas.DataFrame
Data frame of agents making choices.
alternatives: pandas.DataFrame
Data frame of alternatives to choose from.
probs_callback: function
- Function used to generate probabilities
from the sampled interaction data.
- Should return a numpy matrix with the shape
(number of choosers, sample size).
- The probabilities for each row must sum to 1.
- The following arguments will be passed in to the callback:
- interaction_data
- num_choosers
- sample_size
- additional keyword args (see **prob_kwargs)
sample_size: int, optional, default 50
Number of alternatives to sample for each agent.
sample_replace: bool, optional, default True
If True, sampled alternatives and choices can be shared across multiple choosers,
If False, this will generate a non-overlapping choiceset.
verbose: bool, optional, default False
If true, an additional data frame is returned containing
the choice matrix. This has the columns:
- chooser_id: index of the chooser
- alternative_id: index of the alternative
- prob: the probability
**prob_kwargs:
Additional key word arguments to pass to the probabilities
callback.
Returns:
--------
- pandas.DataFrame of the choices, indexed to the chooses, with columns:
- alternative_id: index of the chosen alternative
- prob: probability of the chosen alternative
- optionally, data frame of all samples (see verbose parameter above)
"""
num_choosers = len(choosers)
# get sampled interaction data
interaction_data, sample_size = get_interaction_data(
choosers, alternatives, sample_size, sample_replace)
chooser_idx = interaction_data.index.get_level_values(0).values
alt_idx = interaction_data.index.get_level_values(1).values
# assign weights/probabiltities to the alternatives
# the result should a 2d numpy array with dim num choosers (rows) X num alts (cols)
probs = probs_callback(
interaction_data=interaction_data,
num_choosers=num_choosers,
sample_size=sample_size,
**prob_kwargs)
assert probs.shape == (num_choosers, sample_size)
assert round(probs.sum(), 0) == num_choosers # fix this per Jacob's suggestion?
# make choices for each agent
cs = np.cumsum(probs, axis=1)
r = np.random.rand(num_choosers).reshape(num_choosers, 1)
chosen_rel_idx = np.argmax(r < cs, axis=1)
chosen_abs_idx = chosen_rel_idx + (np.arange(num_choosers) * sample_size)
curr_choices = pd.DataFrame(
{
'alternative_id': alt_idx[chosen_abs_idx],
'prob': probs.ravel()[chosen_abs_idx],
},
index= | pd.Index(choosers.index) | pandas.Index |
# author: <NAME>, <NAME>
# date: 2020-11-28
"""
Fits a Ridge model on the pre-processed training data from the UCI Abalone Data Set
(from https://archive.ics.uci.edu/ml/datasets/abalone).
Saves the model as a sav file.
Usage: src/ML/abalone_fit_predict_model.py --train=<train> --out_dir=<out_dir>
Options:
--train=<train> Path (including filename) to training data (csv file)
--out_dir=<out_dir> Path to directory where the serialized model should be written
"""
from docopt import docopt
from sklearn.linear_model import Ridge
from sklearn.model_selection import RandomizedSearchCV
from sklearn.compose import make_column_transformer
from sklearn.pipeline import FeatureUnion, Pipeline, make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
opt = docopt(__doc__)
def main(input_file, out_dir):
"""run all helper functions to find the best model and get the
hyperparameter tuning result
Parameters
----------
input_file : string
the path (including file name) to the training dataset
out_dir : string
the path to store the results
"""
train_df = pd.read_csv(input_file)
best_ridge, result_df = find_best_model(train_df)
# write the serialized best model into a sav file
pickle.dump(best_ridge, open(out_dir + "/best_predict_model.sav", "wb"))
# save the hyperparameter tuning plot
plot_save(result_df, out_dir + "/hyperparam_tuning.png")
def find_best_model(train_df):
"""find the best model `Ridge` which is a machine learning (ML) linear
regression model
Parameters
----------
input_file : string
the path (including file name) to the training dataset
Returns
-------
best_ridge, result_df : tuple
a tuple that contains the best ridge model object and the tuning
result dataframe
"""
train_df = train_df[train_df["Height"] < 0.6]
X_train, y_train = train_df.drop(columns=["Age"]), train_df["Age"]
# construct a ML pipeline
pipe = get_pipeline()
# tune the hyperparameter alpha using RandomizedSearchCV
param_dist = {"ridge__alpha": 2.0 ** np.arange(-10, 10, 1)}
random_search = RandomizedSearchCV(
pipe,
param_distributions=param_dist,
n_jobs=-1,
n_iter=10,
cv=5,
scoring="r2",
random_state=2020,
)
random_search.fit(X_train, y_train)
best_ridge = random_search.best_estimator_
result_df = (
| pd.DataFrame(random_search.cv_results_) | pandas.DataFrame |
# Dependencies
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter('ignore', UserWarning)
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import sys
import argparse
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import itertools
from scipy import stats
from sklearn.metrics import auc, accuracy_score, roc_curve, precision_score, recall_score, f1_score, roc_auc_score
from lightgbm import LGBMClassifier
import lightgbm as lgb
import matplotlib.gridspec as gridspec
import seaborn as sns
import pylab as plot
import pandas
def display_distributions(actual_imp_df_, null_imp_df_, feature_):
plt.figure(figsize=(13, 6))
gs = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0, 0])
fig = plt.gcf()
fig.set_size_inches(8, 4)
params = {'legend.fontsize': 14, 'legend.handlelength': 2}
plot.rcParams.update(params)
null_imp_df_.loc[null_imp_df_['feature'] == feature_, 'importance_gain'].plot.kde(ax=ax, legend=True, label='Null distribution')
plt.axvline(actual_imp_df_.loc[actual_imp_df_['feature'] == feature_, 'importance_gain'].mean(), 0, np.max(null_imp_df_.loc[null_imp_df_['feature'] == feature_, 'importance_gain'].values), color='r', label='Observed importance')
ax.legend(loc=1)
plt.xlabel('Importance score', fontsize=14)
plt.ylabel('Density', fontsize=14)
plt.tight_layout()
plt.savefig(feature_ + "_importance_plot.svg")
plt.savefig(feature_ + "_importance_plot.png")
plt.show()
def parse_args():
parser = argparse.ArgumentParser(description = "", epilog = "")
parser.add_argument("-df", "--dataFolder", help="Path to where the training data (TCGA, DepMap, Embedding) is stored (REQUIRED).", dest="dataFolder")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
available_samples = ["s1","s2","s3","s4","s5","s6","s7","s8","s9","s10"]
cancer_type_list = ["liver","breast","bladder", "colon", "ovarian", "kidney", "leukemia","pancreatic","lung"]
orderFeatures = ["essentiality","mutation","expression", "e0", "e1", "e2", "e3", "e4", "e5", "e6", "e7", "e8", "e9", "e10", "e11", "e12", "e13", "e14", "e15", "e16", "e17", "e18", "e19", "e20", "e21", "e22", "e23", "e24", "e25", "e26", "e27", "e28", "e29", "e30", "e31"]
for cancer_type in cancer_type_list:
cancerCorr = pd.DataFrame()
for inx, sampleNumber in enumerate(available_samples):
# Load dataset
data = pandas.read_csv(args.dataFolder + cancer_type.capitalize() + "/" + cancer_type + "_training_data_" + sampleNumber + ".dat", header=0, sep=",")
data.drop("gene", axis=1, inplace=True)
data = data[data['label'] != 2]
dataframePositive = data[data['label'] == 1]
dataframeNegative = data[data['label'] == 0]
positiveSize = dataframePositive.shape[0]
negativeSize = dataframeNegative.shape[0]
# Set them the same size
if(positiveSize > negativeSize):
dataframePositive = dataframePositive.head(-(positiveSize-negativeSize))
elif(negativeSize > positiveSize):
dataframeNegative = dataframeNegative.head(-(negativeSize-positiveSize))
data = dataframePositive.copy()
data = pd.concat([dataframePositive, dataframeNegative])
categorical_feats = [
f for f in data.columns if data[f].dtype == 'object'
]
categorical_feats
for f_ in categorical_feats:
data[f_], _ = | pandas.factorize(data[f_]) | pandas.factorize |
import time
import pandas as pd
import os
from hungarian import *
from collections import deque
import multiprocessing as mp
import numpy as np
import sys
sys.setrecursionlimit(1000000)
sys.getrecursionlimit()
class TreeNode: # This is the node for tree serch
def __init__(self, builidng_id, location_id, tree_vec, parent):
# self.tree_node_id = None
self.building_id = int(builidng_id)
self.location_id = int(location_id)
self.parent = parent
self.lower_value = None
self.upper_value = None
self.tree_vec = tree_vec # tree vector indicates at the tree nodes the locations are assigned to which building
self.assignment_mat_1 = None
self.assignment_mat_2 = None
def GLB_(assignment_mat):
location_ind, building_ind = linear_sum_assignment(assignment_mat)
value = assignment_mat[location_ind, building_ind].sum() # + M # As this is the symmetric case, the i,k of each branch must be chosen in its corresponding l,j
return {'building_ind': building_ind, 'location_ind': location_ind, 'value': value}
branch_list = []
GLB_cost_mat = np.zeros((1,1))
def init(branch_list_, GLB_cost_mat_):
global branch_list
global GLB_cost_mat
branch_list = branch_list_
GLB_cost_mat = GLB_cost_mat_
def Branch_update(multiplier_mat, tmp_value):
for branch in branch_list:
branch.assignment_mat = branch.init_assignment_mat - multiplier_mat
solution = GLB_(branch.assignment_mat)
branch.location_ind = solution['location_ind']
branch.building_ind = solution['building_ind']
branch.lower_value = solution['value'] + tmp_value
GLB_cost_mat[branch.i_ind - 1][branch.k_ind - 1] = branch.lower_value
class BAB:
def __init__(self, instance, glbsolver, args, cwd):
self.instance = instance
self.LB, self.UB = glbsolver.LB, glbsolver.UB
self.args = args
self.bf_lower_bound_list = [self.LB]
self.bf_upper_bound_list = [self.UB]
self.lb_lower_bound_list = [self.LB]
self.lb_upper_bound_list = [self.UB]
self.tree_node_list=[0]
self.current_layer_nodes = []
self.branch_iter = 0
self.best_solution_1 = None
self.best_solution_2 = None
self.random_i1_list = []
self.random_i2_list = []
self.nb_local = 0
# for quick access
self.target_relative_gap = args['target_relative_gap']
self.max_branch_iters = args['max_branch_iters']
self.M = args['M']
self.time_limit = args['time_limit']
self.start_time_breadth = 0.0
self.valid_time_breadth = 0.0
self.start_time_lb = 0.0
self.valid_time_lb = 0.0
self.nb_of_orig_building = instance.nb_of_orig_building
self.nb_of_orig_location = instance.nb_of_orig_location
self.nb_of_dest_building = instance.nb_of_dest_building
self.nb_of_dest_location = instance.nb_of_dest_location
self.flow_mat = instance.flow_mat
self.trans_cost_mat = instance.trans_cost_mat
self.build_cost_orig_mat = instance.build_cost_orig_mat
self.build_cost_dest_mat = instance.build_cost_dest_mat
self.pathfile=cwd
def local_search(self, tree_node):
assignment_mat_1, assignment_mat_2 = tree_node.assignment_mat_1, tree_node.assignment_mat_2
UpperBound = np.sum(self.flow_mat * np.matmul(np.matmul(assignment_mat_1, self.trans_cost_mat), assignment_mat_2)) + \
np.sum(self.build_cost_orig_mat * assignment_mat_1) + np.sum(self.build_cost_dest_mat * assignment_mat_2)
tree_node.upper_value = UpperBound
return
local_search_list = deque()
local_search_list.append(assignment_mat_1)
Flag_Swap = 1
while (len(local_search_list) != 0 and Flag_Swap <= 10000):
temp_assign_mat = local_search_list[0]
assignment_mat_tmp = local_search_list[0]
for i in range(self.nb_local):
temp_assign_mat = local_search_list[0]
if self.random_i1_list[i] != self.random_i2_list[i]:
temp_assign_mat[[self.random_i1_list[i], self.random_i2_list[i]], :] = temp_assign_mat[[self.random_i2_list[i], self.random_i1_list[i]],:]
tmp_UB = np.sum(self.flow_mat * np.matmul(np.matmul(temp_assign_mat, self.trans_cost_mat), assignment_mat_2)) + \
np.sum(self.build_cost_orig_mat * temp_assign_mat) + np.sum(self.build_cost_dest_mat * assignment_mat_2)
if tmp_UB < UpperBound:
# print(UpperBound)
UpperBound = tmp_UB
assignment_mat_tmp = temp_assign_mat
local_search_list.append(assignment_mat_tmp)
local_search_list.popleft()
Flag_Swap = Flag_Swap + 1
assignment_mat_1 = assignment_mat_tmp
local_search_list = deque()
local_search_list.append(assignment_mat_2)
while (len(local_search_list) != 0 and Flag_Swap <= 20000):
temp_assign_mat = local_search_list[0]
assignment_mat_tmp = local_search_list[0]
for i in range(self.nb_local):
temp_assign_mat = local_search_list[0]
if self.random_i1_list[i] != self.random_i2_list[i]:
temp_assign_mat[[self.random_i1_list[i], self.random_i2_list[i]], :] = temp_assign_mat[[self.random_i2_list[i], self.random_i1_list[i]],:]
tmp_UB = np.sum(self.flow_mat * np.matmul(np.matmul(assignment_mat_1, self.trans_cost_mat),temp_assign_mat.T)) + \
np.sum(self.build_cost_orig_mat * assignment_mat_1) + np.sum(self.build_cost_dest_mat * temp_assign_mat)
if tmp_UB < UpperBound:
UpperBound = tmp_UB
assignment_mat_tmp = temp_assign_mat
local_search_list.append(assignment_mat_tmp)
local_search_list.popleft()
Flag_Swap += 1
assignment_mat_2 = assignment_mat_tmp
tree_node.upper_value = UpperBound
tree_node.assignment_mat_1, tree_node.assignment_mat_2 = assignment_mat_1, assignment_mat_2
def solveNode(self, live_node):
tree_nodes = []
live_building_id = int(live_node.building_id + 1)
for i in range(self.nb_of_dest_location):
tmp_tree_vec = live_node.tree_vec.copy() # should copy, not use ip address
if tmp_tree_vec[i] == -1: # and tmp_tree_vec.count(-1) > 1 # todo: change tree_vec to dict
tmp_tree_vec[i] = live_building_id
tree_node = TreeNode(live_building_id, i, tmp_tree_vec, live_node)
multiplier_mat = np.zeros([self.nb_of_dest_location, self.nb_of_dest_building])
tmp_value = 0
for k in range(self.nb_of_dest_building):
if tree_node.tree_vec[k] != -1:
l_ind = k
j_ind = tree_node.tree_vec[k]
multiplier_mat[l_ind, j_ind] = self.M
tmp_value += self.M
Branch_update(multiplier_mat, tmp_value)
lower_solution_1 = Hungarian_1(GLB_cost_mat + self.build_cost_orig_mat)
assignment_mat_1 = np.zeros([self.nb_of_orig_building, self.nb_of_orig_location])
assignment_mat_1[lower_solution_1['building_ind'], lower_solution_1['location_ind']] = 1
lower_solution_2 = Hungarian_2(self.build_cost_dest_mat - multiplier_mat)
assignment_mat_2 = np.zeros([self.nb_of_dest_location, self.nb_of_dest_building])
assignment_mat_2[lower_solution_2['location_ind'], lower_solution_2['building_ind']] = 1
tree_node.lower_value = lower_solution_1['value'] + lower_solution_2['value'] + tmp_value
tree_node.assignment_mat_1, tree_node.assignment_mat_2 = assignment_mat_1, assignment_mat_2
self.local_search(tree_node)
tree_nodes.append(tree_node)
return tree_nodes
def solveNodes(self, nodes):
child_node_list = []
lb, ub = np.inf, self.UB
best_node = None
for live_node in nodes:
if time.time() > self.valid_time_breadth: break
tree_nodes = self.solveNode(live_node)
for tree_node in tree_nodes:
if tree_node.upper_value < ub:
ub = tree_node.upper_value
best_node = tree_node
# as still two locations are not assigned, the solution is an lower bound solution
if tree_node.tree_vec.count(-1) > 1:
if tree_node.lower_value <= ub:
if tree_node.lower_value < lb: lb = tree_node.lower_value
child_node_list.append(tree_node)
return child_node_list, lb, ub, best_node
def createRoot(self):
tree_vec = [-1] * self.nb_of_dest_building
root = TreeNode(-1, -1, tree_vec, -1) # generate the root tree_node
root.lower_value = self.LB
root.upper_value = self.UB
return root
def checkStopCondition(self):
GAP = (self.UB - self.LB) / self.UB
print(f'**BNB-BF iter {self.branch_iter}: Best Lower bound = ', self.LB)
print(f'**BNB-BF iter {self.branch_iter}: Best Upper bound = ', self.UB)
print(f'**BNB-BF iter {self.branch_iter}: GAP = ', GAP)
self.bf_lower_bound_list.append(self.LB)
self.bf_upper_bound_list.append(self.UB)
if GAP <= self.target_relative_gap:
print('**BNB-BF target relative gap reached')
return True
if self.branch_iter >= self.max_branch_iters:
print('**BNB-BF max branch iters reached')
return True
if time.time() >= self.valid_time_breadth:
print('**BNB-BF time limit reached')
return True
def createRandomList(self):
for i in range(self.nb_of_orig_building):
for j in range(self.nb_of_orig_building):
if i != j:
self.random_i1_list.append(i)
self.random_i2_list.append(j)
self.nb_local = len(self.random_i1_list)
def solve_breadth(self, solver_status):
self.createRandomList()
if self.args['threads'] == -1:
cores = mp.cpu_count()
else:
cores = self.args['threads']
p = mp.Pool(processes=cores, initializer=init, initargs=(self.instance.branch_list,self.instance.GLB_cost_mat))
self.start_time_breadth = time.time()
self.valid_time_breadth = self.start_time_breadth + self.time_limit
root = self.createRoot()
task_list = [[root]] + [[] for _ in range(cores-1)]
number_of_nodes = 1
while True:
# new iter
self.branch_iter += 1
print(f'**BNB-BF iter {self.branch_iter}: nodes {number_of_nodes}')
self.tree_node_list.append(number_of_nodes)
# solve nodes
result_list = p.map(self.solveNodes, task_list)
# update lb and ub
result_with_new_lb = min(result_list, key=lambda x: x[1])
new_lb = result_with_new_lb[1]
if self.LB < new_lb < np.inf:
self.LB = new_lb
result_with_new_ub = min(result_list, key=lambda x: x[2])
new_ub = result_with_new_ub[2]
if new_ub < self.UB:
self.UB = new_ub
self.best_solution_1 = result_with_new_ub[3].assignment_mat_1
self.best_solution_2 = result_with_new_ub[3].assignment_mat_2
stop_flag = self.checkStopCondition()
if stop_flag: break
# update task_list
all_node_list = []
for result in result_list:
for node in result[0]:
if node.lower_value < self.UB:
all_node_list.append(node)
number_of_nodes = len(all_node_list)
if number_of_nodes == 0:
print('**BNB-BF branch and bound complete')
solver_status.value = 1
break
ave_load = int(np.ceil(number_of_nodes / cores))
task_list = []
for i in range(cores-1): task_list.append(all_node_list[i*ave_load:(i+1)*ave_load])
task_list.append(all_node_list[(i+1)*ave_load:])
# time
t1 = time.time()
print(f'**BNB-BF iter {self.branch_iter}: elapsed time {t1 - self.start_time_breadth}')
print(f'**BNB-BF best solution1 {self.best_solution_1}, best solution2 {self.best_solution_2}')
solution_1_df= | pd.DataFrame(self.best_solution_1) | pandas.DataFrame |
# Front matter
import datetime
import pandas as pd
import numpy as np
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
import time
# Seaborn, useful for graphics
import seaborn as sns
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
rc = {'lines.linewidth': 1,
'axes.labelsize': 18,
'axes.titlesize': 18,
'legend.fontsize': 22,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
# Functions
###########
def Ks_from_vphi_rho(vphi,rho):
return vphi**2*rho
def vphi_from_vp_vs(vp,vs):
return np.sqrt(vp**2-(4/3)*vs**2)
# Define input data
###################
studylist = []
labelchoice = dict()
EOSpath = dict()
colorchoice = dict()
linestylechoice = dict()
# hcp Fe Dewaele 2006 re-analyzed
study = 'hcpFe_Refit'
studylist.append(study)
labelchoice[study] = r'$\gamma$ from Murphy et al. 2011a'
EOSpath[study] = 'MINUTI/hcpFe_Dewaele_Refit_HighT'
colorchoice[study] = 'gray'
linestylechoice[study] = '-'
# hcp Fe Dewaele 2006
study = 'hcpFe_Dewaele'
studylist.append(study)
labelchoice[study] = r'$\gamma$ from this study'
EOSpath[study] = 'MINUTI/hcpFe_Dewaele_HighT'
colorchoice[study] = 'black'
linestylechoice[study] = '--'
# hcp Fe Fei 2016
study = 'hcpFe_Fei'
studylist.append(study)
labelchoice[study] = r'$\gamma$ from Fei et al. 2016'
EOSpath[study] = 'MINUTI/hcpFe_Fei_HighT'
colorchoice[study] = 'green'
linestylechoice[study] = '-.'
# Import data
#############
EOS_dfdict = dict()
temperatures = [5500]
for study in studylist:
print('Now importing '+study)
for T in temperatures:
density_path = EOSpath[study]+'/'+str(T)+'K_q_rep/'+study+'_dns.dat'
rho_df = pd.read_csv(density_path,header=None,sep='\s+',comment='#',engine='python')
rho_df.columns = ['P','rho']
vphi_path = EOSpath[study]+'/'+str(T)+'K_q_rep/'+study+'_smv.dat'
vphi_df = pd.read_csv(vphi_path,header=None,sep='\s+',comment='#',engine='python')
vphi_df.columns = ['P','vphi']
EOS_df = pd.merge(rho_df,vphi_df,on='P')
EOS_df['Ks'] = Ks_from_vphi_rho(EOS_df['vphi'],EOS_df['rho'])
EOS_dfdict[(study,T)] = EOS_df
# Load Seismic Observations
###########################
# Load Earth values and do some basic calcs so we're working with complete data sets
PREM_innercore = | pd.read_csv('../../FeAlloyEOS/PREM/PREM_innercore.csv') | pandas.read_csv |
import os
import tensorflow as tf
import numpy as np
import re
import pickle
import time
import pandas as pd
from pathlib import Path
import utils.gen_utils as utils
import utils.dataset_processors as dataset_processors
import utils.linguistic_features_utils as feature_utils
from sklearn.model_selection import StratifiedKFold
from keras.models import model_from_json
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#http://zachmoshe.com/2017/04/03/pickling-keras-models.html
def get_inputs(inp_dir, dataset, embed, embed_mode, mode, layer):
""" Read data from pkl file and prepare for training. """
file = open(inp_dir + dataset + '-' + embed + '-' + embed_mode + '-' + mode + '.pkl', 'rb')
data = pickle.load(file)
orders, data_x, data_y = list(zip(*data))
file.close()
# alphaW is responsible for which BERT layer embedding we will be using
if (layer == 'all'):
alphaW = np.full([n_hl], 1 / n_hl)
else:
alphaW = np.zeros([n_hl])
alphaW[int(layer) - 1] = 1
# just changing the way data is stored (tuples of minibatches) and getting the output for the required layer of BERT using alphaW
inputs = []
targets = []
author_ids = []
n_batches = len(data_y)
print(len(orders))
for ii in range(n_batches):
inputs.extend(np.einsum('k,kij->ij', alphaW, data_x[ii]))
targets.extend(data_y[ii])
author_ids.extend(orders[ii])
print('inputs shape: ', np.array(inputs).shape)
print('author_ids shape: ', np.array(author_ids).shape)
inputs = pd.DataFrame(np.array(inputs))
inputs['order'] = author_ids
inputs = inputs.set_index(['order'])
full_targets = pd.DataFrame(np.array(targets))
full_targets['order'] = author_ids
full_targets = full_targets.set_index(['order'])
if dataset == 'essays':
dump_data = dataset_processors.load_essays_df('../data/essays/essays.csv')
trait_labels = ['EXT', 'NEU', 'AGR', 'CON', 'OPN']
elif dataset == 'kaggle':
dump_data = dataset_processors.load_Kaggle_df('../data/kaggle/kaggle.csv')
trait_labels = ['E', 'N', 'F', 'J']
_, _, _, other_features_df = feature_utils.get_psycholinguist_data(dump_data, dataset, feature_flags)
inputs, full_targets = merge_features(inputs, other_features_df, trait_labels)
return inputs, full_targets, trait_labels
def merge_features(embedding, other_features, trait_labels):
""" Merge BERT and Psychologic features. """
if dataset == 'essays':
orders = | pd.read_csv('../data/essays/author_id_order.csv') | pandas.read_csv |
"""
Extracts images from Open Images (downloaded with download_open_images.py) into "food" and "not_food" categories,
training, evaluation and test sets are ignored.
Original data downloaded here: https://voxel51.com/docs/fiftyone/tutorials/open_images.html
"""
import pandas as pd
import os
import argparse
import pathlib
import random
from shutil import copy2
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
"--targ_dir",
default="../data/open_images",
help="target directory where downloaded images are",
)
parser.add_argument(
"-d",
"--dest_dir",
default="../data/open_images_extracted",
help="destination directory to extract images from targ_dir to",
)
args = parser.parse_args()
targ_dir = args.targ_dir
assert os.path.exists(targ_dir), "Target directory does not exist, please create it"
# Read in class names and turn to list and create classes and label dict
print(f"[INFO] Getting class names of Open Images data...")
targ_classes_path = os.path.join(targ_dir, "train", "metadata", "classes.csv")
classed_df = classes_df = pd.read_csv(targ_classes_path, names=["id", "class"])
class_list = classes_df["class"].tolist()
class_label_dict = dict(zip(classes_df["id"], classes_df["class"]))
# Read in food names list (from NLTK)
print(f"[INFO] Getting list of foods...")
with open("../data/food_list.txt", "r") as f:
food_list = f.read().splitlines()
# Filter Open Images class list for food classes
print(f"[INFO] Filtering list of Open Images classes to find food classes...")
open_images_food_list = []
for class_name in class_list:
if class_name.lower() in food_list:
open_images_food_list.append(class_name)
print(
f"[INFO] Found some foods, here's 10 random picks:"
f"\t{random.sample(open_images_food_list, 10)}"
)
# Add column to classes_df saying whether the class is food or not
classes_df["is_food"] = classes_df["class"].isin(open_images_food_list)
# Get all image paths from targ_dir (this directory contains all downloaded Open Images)
image_path_list = list(pathlib.Path(targ_dir).glob("*/*/*.jpg"))
# Turn all image paths into a list of their IDs
image_ids = [image_path.name.split(".")[0] for image_path in image_path_list]
# Read in all label files from Open Images (train, val, test)
labels_list = list(pathlib.Path(targ_dir).glob("*/labels/classifications.csv"))
# Turn all labels into a single DataFrame (so it can be manipulated)
print(
f"[INFO] Importing Open Images label files (train, val, test) and combining them..."
)
labels_df_list = []
for labels in labels_list:
df = | pd.read_csv(labels) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
This module is a work in progress, as such concepts are subject to change.
MAIN IDEA:
`MultiTaskSamples` serves as a structure to contain and manipulate a set of
samples with potentially many different types of labels and features.
"""
import logging
import utool as ut
import ubelt as ub
import numpy as np
from wbia import dtool as dt
import pandas as pd
import sklearn
import sklearn.metrics
import sklearn.ensemble
import sklearn.impute
import sklearn.pipeline
import sklearn.neural_network
from wbia.algo.verif import sklearn_utils
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class XValConfig(dt.Config):
_param_info_list = [
# ut.ParamInfo('type', 'StratifiedKFold'),
ut.ParamInfo('type', 'StratifiedGroupKFold'),
ut.ParamInfo('n_splits', 3),
ut.ParamInfo(
'shuffle', True, hideif=lambda cfg: cfg['type'] == 'StratifiedGroupKFold'
),
ut.ParamInfo(
'random_state',
3953056901,
hideif=lambda cfg: cfg['type'] == 'StratifiedGroupKFold',
),
]
@ut.reloadable_class
class ClfProblem(ut.NiceRepr):
def __init__(pblm):
pblm.deploy_task_clfs = None
pblm.eval_task_clfs = None
pblm.xval_kw = XValConfig()
pblm.eval_task_clfs = None
pblm.task_combo_res = None
pblm.verbose = True
def set_pandas_options(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def set_pandas_options_low(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 5
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def set_pandas_options_normal(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def learn_evaluation_classifiers(pblm, task_keys=None, clf_keys=None, data_keys=None):
"""
Evaluates by learning classifiers using cross validation.
Do not use this to learn production classifiers.
python -m wbia.algo.verif.vsone evaluate_classifiers --db PZ_PB_RF_TRAIN --show
Example:
CommandLine:
python -m clf_helpers learn_evaluation_classifiers
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.verif.clf_helpers import * # NOQA
>>> pblm = IrisProblem()
>>> pblm.setup()
>>> pblm.verbose = True
>>> pblm.eval_clf_keys = ['Logit', 'RF']
>>> pblm.eval_task_keys = ['iris']
>>> pblm.eval_data_keys = ['learn(all)']
>>> result = pblm.learn_evaluation_classifiers()
>>> res = pblm.task_combo_res['iris']['Logit']['learn(all)']
>>> res.print_report()
>>> res = pblm.task_combo_res['iris']['RF']['learn(all)']
>>> res.print_report()
>>> print(result)
"""
pblm.eval_task_clfs = ut.AutoVivification()
pblm.task_combo_res = ut.AutoVivification()
if task_keys is None:
task_keys = pblm.eval_task_keys
if data_keys is None:
data_keys = pblm.eval_data_keys
if clf_keys is None:
clf_keys = pblm.eval_clf_keys
if task_keys is None:
task_keys = [pblm.primary_task_key]
if data_keys is None:
data_keys = [pblm.default_data_key]
if clf_keys is None:
clf_keys = [pblm.default_clf_key]
if pblm.verbose:
ut.cprint('[pblm] learn_evaluation_classifiers', color='blue')
ut.cprint('[pblm] task_keys = {}'.format(task_keys))
ut.cprint('[pblm] data_keys = {}'.format(data_keys))
ut.cprint('[pblm] clf_keys = {}'.format(clf_keys))
Prog = ut.ProgPartial(freq=1, adjust=False, prehack='%s')
task_prog = Prog(task_keys, label='Task')
for task_key in task_prog:
dataset_prog = Prog(data_keys, label='Data')
for data_key in dataset_prog:
clf_prog = Prog(clf_keys, label='CLF')
for clf_key in clf_prog:
pblm._ensure_evaluation_clf(task_key, data_key, clf_key)
def _ensure_evaluation_clf(pblm, task_key, data_key, clf_key, use_cache=True):
"""
Learns and caches an evaluation (cross-validated) classifier and tests
and caches the results.
data_key = 'learn(sum,glob)'
clf_key = 'RF'
"""
# TODO: add in params used to construct features into the cfgstr
if hasattr(pblm.samples, 'sample_hashid'):
ibs = pblm.infr.ibs
sample_hashid = pblm.samples.sample_hashid()
feat_dims = pblm.samples.X_dict[data_key].columns.values.tolist()
# cfg_prefix = sample_hashid + pblm.qreq_.get_cfgstr() + feat_cfgstr
est_kw1, est_kw2 = pblm._estimator_params(clf_key)
param_id = ut.get_dict_hashid(est_kw1)
xval_id = pblm.xval_kw.get_cfgstr()
cfgstr = '_'.join(
[
sample_hashid,
param_id,
xval_id,
task_key,
data_key,
clf_key,
ut.hashid_arr(feat_dims, 'feats'),
]
)
fname = 'eval_clfres_' + ibs.dbname
else:
fname = 'foo'
feat_dims = None
cfgstr = 'bar'
use_cache = False
# TODO: ABI class should not be caching
cacher_kw = dict(appname='vsone_rf_train', enabled=use_cache, verbose=1)
cacher_clf = ub.Cacher(fname, cfgstr=cfgstr, meta=[feat_dims], **cacher_kw)
data = cacher_clf.tryload()
if not data:
data = pblm._train_evaluation_clf(task_key, data_key, clf_key)
cacher_clf.save(data)
clf_list, res_list = data
labels = pblm.samples.subtasks[task_key]
combo_res = ClfResult.combine_results(res_list, labels)
pblm.eval_task_clfs[task_key][clf_key][data_key] = clf_list
pblm.task_combo_res[task_key][clf_key][data_key] = combo_res
def _train_evaluation_clf(pblm, task_key, data_key, clf_key, feat_dims=None):
"""
Learns a cross-validated classifier on the dataset
Ignore:
>>> from wbia.algo.verif.vsone import * # NOQA
>>> pblm = OneVsOneProblem()
>>> pblm.load_features()
>>> pblm.load_samples()
>>> data_key = 'learn(all)'
>>> task_key = 'photobomb_state'
>>> clf_key = 'RF-OVR'
>>> task_key = 'match_state'
>>> data_key = pblm.default_data_key
>>> clf_key = pblm.default_clf_key
"""
X_df = pblm.samples.X_dict[data_key]
labels = pblm.samples.subtasks[task_key]
assert np.all(labels.encoded_df.index == X_df.index)
clf_partial = pblm._get_estimator(clf_key)
xval_kw = pblm.xval_kw.asdict()
clf_list = []
res_list = []
skf_list = pblm.samples.stratified_kfold_indices(**xval_kw)
skf_prog = ut.ProgIter(skf_list, label='skf-train-eval')
for train_idx, test_idx in skf_prog:
X_df_train = X_df.iloc[train_idx]
assert X_df_train.index.tolist() == ut.take(pblm.samples.index, train_idx)
# train_uv = X_df.iloc[train_idx].index
# X_train = X_df.loc[train_uv]
# y_train = labels.encoded_df.loc[train_uv]
if feat_dims is not None:
X_df_train = X_df_train[feat_dims]
X_train = X_df_train.values
y_train = labels.encoded_df.iloc[train_idx].values.ravel()
clf = clf_partial()
clf.fit(X_train, y_train)
# Note: There is a corner case where one fold doesn't get any
# labels of a certain class. Because y_train is an encoded integer,
# the clf.classes_ attribute will cause predictions to agree with
# other classifiers trained on the same labels.
# Evaluate results
res = ClfResult.make_single(
clf, X_df, test_idx, labels, data_key, feat_dims=feat_dims
)
clf_list.append(clf)
res_list.append(res)
return clf_list, res_list
def _external_classifier_result(
pblm, clf, task_key, data_key, feat_dims=None, test_idx=None
):
"""
Given an external classifier (ensure its trained on disjoint data)
evaluate all data on it.
Args:
test_idx (list): subset of this classifier to test on
(defaults to all if None)
"""
X_df = pblm.samples.X_dict[data_key]
if test_idx is None:
test_idx = np.arange(len(X_df))
labels = pblm.samples.subtasks[task_key]
res = ClfResult.make_single(
clf, X_df, test_idx, labels, data_key, feat_dims=feat_dims
)
return res
def learn_deploy_classifiers(pblm, task_keys=None, clf_key=None, data_key=None):
"""
Learns on data without any train/validation split
"""
if pblm.verbose > 0:
ut.cprint('[pblm] learn_deploy_classifiers', color='blue')
if clf_key is None:
clf_key = pblm.default_clf_key
if data_key is None:
data_key = pblm.default_data_key
if task_keys is None:
task_keys = list(pblm.samples.supported_tasks())
if pblm.deploy_task_clfs is None:
pblm.deploy_task_clfs = ut.AutoVivification()
Prog = ut.ProgPartial(freq=1, adjust=False, prehack='%s')
task_prog = Prog(task_keys, label='Task')
task_clfs = {}
for task_key in task_prog:
clf = pblm._train_deploy_clf(task_key, data_key, clf_key)
task_clfs[task_key] = clf
pblm.deploy_task_clfs[task_key][clf_key][data_key] = clf
return task_clfs
def _estimator_params(pblm, clf_key):
est_type = clf_key.split('-')[0]
if est_type in {'RF', 'RandomForest'}:
est_kw1 = {
# 'max_depth': 4,
'bootstrap': True,
'class_weight': None,
'criterion': 'entropy',
'max_features': 'sqrt',
# 'max_features': None,
'min_samples_leaf': 5,
'min_samples_split': 2,
# 'n_estimators': 64,
'n_estimators': 256,
}
# Hack to only use missing values if we have the right sklearn
if 'missing_values' in ut.get_func_kwargs(
sklearn.ensemble.RandomForestClassifier.__init__
):
est_kw1['missing_values'] = np.nan
est_kw2 = {
'random_state': 3915904814,
'verbose': 0,
'n_jobs': -1,
}
elif est_type in {'SVC', 'SVM'}:
est_kw1 = dict(kernel='linear')
est_kw2 = {}
elif est_type in {'Logit', 'LogisticRegression'}:
est_kw1 = {}
est_kw2 = {}
elif est_type in {'MLP'}:
est_kw1 = dict(
activation='relu',
alpha=1e-05,
batch_size='auto',
beta_1=0.9,
beta_2=0.999,
early_stopping=False,
epsilon=1e-08,
hidden_layer_sizes=(10, 10),
learning_rate='constant',
learning_rate_init=0.001,
max_iter=200,
momentum=0.9,
nesterovs_momentum=True,
power_t=0.5,
random_state=3915904814,
shuffle=True,
solver='lbfgs',
tol=0.0001,
validation_fraction=0.1,
warm_start=False,
)
est_kw2 = dict(verbose=False)
else:
raise KeyError('Unknown Estimator')
return est_kw1, est_kw2
def _get_estimator(pblm, clf_key):
"""
Returns sklearn classifier
"""
tup = clf_key.split('-')
wrap_type = None if len(tup) == 1 else tup[1]
est_type = tup[0]
multiclass_wrapper = {
None: ut.identity,
'OVR': sklearn.multiclass.OneVsRestClassifier,
'OVO': sklearn.multiclass.OneVsOneClassifier,
}[wrap_type]
est_class = {
'RF': sklearn.ensemble.RandomForestClassifier,
'SVC': sklearn.svm.SVC,
'Logit': sklearn.linear_model.LogisticRegression,
'MLP': sklearn.neural_network.MLPClassifier,
}[est_type]
est_kw1, est_kw2 = pblm._estimator_params(est_type)
est_params = ut.merge_dicts(est_kw1, est_kw2)
# steps = []
# steps.append((est_type, est_class(**est_params)))
# if wrap_type is not None:
# steps.append((wrap_type, multiclass_wrapper))
if est_type == 'MLP':
def clf_partial():
pipe = sklearn.pipeline.Pipeline(
[
('inputer', sklearn.impute.SimpleImputer(strategy='mean')),
# ('scale', sklearn.preprocessing.StandardScaler),
('est', est_class(**est_params)),
]
)
return multiclass_wrapper(pipe)
elif est_type == 'Logit':
def clf_partial():
pipe = sklearn.pipeline.Pipeline(
[
('inputer', sklearn.impute.SimpleImputer(strategy='mean')),
('est', est_class(**est_params)),
]
)
return multiclass_wrapper(pipe)
else:
def clf_partial():
return multiclass_wrapper(est_class(**est_params))
return clf_partial
def _train_deploy_clf(pblm, task_key, data_key, clf_key):
X_df = pblm.samples.X_dict[data_key]
labels = pblm.samples.subtasks[task_key]
assert np.all(labels.encoded_df.index == X_df.index)
clf_partial = pblm._get_estimator(clf_key)
logger.info(
'Training deployment {} classifier on {} for {}'.format(
clf_key, data_key, task_key
)
)
clf = clf_partial()
index = X_df.index
X = X_df.loc[index].values
y = labels.encoded_df.loc[index].values.ravel()
clf.fit(X, y)
return clf
def _optimize_rf_hyperparams(pblm, data_key=None, task_key=None):
"""
helper script I've only run interactively
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.verif.vsone import * # NOQA
>>> pblm = OneVsOneProblem.from_empty('PZ_PB_RF_TRAIN')
#>>> pblm = OneVsOneProblem.from_empty('GZ_Master1')
>>> pblm.load_samples()
>>> pblm.load_features()
>>> pblm.build_feature_subsets()
>>> data_key=None
>>> task_key=None
"""
from sklearn.model_selection import RandomizedSearchCV # NOQA
from sklearn.model_selection import GridSearchCV # NOQA
from sklearn.ensemble import RandomForestClassifier
from wbia.algo.verif import sklearn_utils
if data_key is None:
data_key = pblm.default_data_key
if task_key is None:
task_key = pblm.primary_task_key
# Load data
X = pblm.samples.X_dict[data_key].values
y = pblm.samples.subtasks[task_key].y_enc
groups = pblm.samples.group_ids
# Define estimator and parameter search space
grid = {
'bootstrap': [True, False],
'class_weight': [None, 'balanced'],
'criterion': ['entropy', 'gini'],
# 'max_features': ['sqrt', 'log2'],
'max_features': ['sqrt'],
'min_samples_leaf': list(range(2, 11)),
'min_samples_split': list(range(2, 11)),
'n_estimators': [8, 64, 128, 256, 512, 1024],
}
est = RandomForestClassifier(missing_values=np.nan)
if False:
# debug
params = ut.util_dict.all_dict_combinations(grid)[0]
est.set_params(verbose=10, n_jobs=1, **params)
est.fit(X=X, y=y)
cv = sklearn_utils.StratifiedGroupKFold(n_splits=3)
if True:
n_iter = 25
SearchCV = ut.partial(RandomizedSearchCV, n_iter=n_iter)
else:
n_iter = ut.prod(map(len, grid.values()))
SearchCV = GridSearchCV
search = SearchCV(est, grid, cv=cv, verbose=10)
n_cpus = ut.num_cpus()
thresh = n_cpus * 1.5
n_jobs_est = 1
n_jobs_ser = min(n_cpus, n_iter)
if n_iter < thresh:
n_jobs_est = int(max(1, thresh / n_iter))
est.set_params(n_jobs=n_jobs_est)
search.set_params(n_jobs=n_jobs_ser)
search.fit(X=X, y=y, groups=groups)
res = search.cv_results_.copy()
alias = ut.odict(
[
('rank_test_score', 'rank'),
('mean_test_score', 'μ-test'),
('std_test_score', 'σ-test'),
('mean_train_score', 'μ-train'),
('std_train_score', 'σ-train'),
('mean_fit_time', 'fit_time'),
('params', 'params'),
]
)
res = ut.dict_subset(res, alias.keys())
cvresult_df = pd.DataFrame(res).rename(columns=alias)
cvresult_df = cvresult_df.sort_values('rank').reset_index(drop=True)
params = pd.DataFrame.from_dict(cvresult_df['params'].values.tolist())
logger.info('Varied params:')
logger.info(ut.repr4(ut.map_vals(set, params.to_dict('list'))))
logger.info('Ranked Params')
logger.info(params)
logger.info('Ranked scores on development set:')
logger.info(cvresult_df)
logger.info('Best parameters set found on hyperparam set:')
logger.info('best_params_ = %s' % (ut.repr4(search.best_params_),))
logger.info('Fastest params')
cvresult_df.loc[cvresult_df['fit_time'].idxmin()]['params']
def _dev_calib(pblm):
"""
interactive script only
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import calibration_curve
from sklearn.metrics import log_loss, brier_score_loss
# Load data
data_key = pblm.default_data_key
task_key = pblm.primary_task_key
X = pblm.samples.X_dict[data_key].values
y = pblm.samples.subtasks[task_key].y_enc
groups = pblm.samples.group_ids
# Split into test/train/valid
cv = sklearn_utils.StratifiedGroupKFold(n_splits=2)
test_idx, train_idx = next(cv.split(X, y, groups))
# valid_idx = train_idx[0::2]
# train_idx = train_idx[1::2]
# train_valid_idx = np.hstack([train_idx, valid_idx])
# Train Uncalibrated RF
est_kw = pblm._estimator_params('RF')[0]
uncal_clf = RandomForestClassifier(**est_kw)
uncal_clf.fit(X[train_idx], y[train_idx])
uncal_probs = uncal_clf.predict_proba(X[test_idx]).T[1]
uncal_score = log_loss(y[test_idx] == 1, uncal_probs)
uncal_brier = brier_score_loss(y[test_idx] == 1, uncal_probs)
# Train Calibrated RF
method = 'isotonic' if len(test_idx) > 2000 else 'sigmoid'
precal_clf = RandomForestClassifier(**est_kw)
# cv = sklearn_utils.StratifiedGroupKFold(n_splits=3)
cal_clf = CalibratedClassifierCV(precal_clf, cv=2, method=method)
cal_clf.fit(X[train_idx], y[train_idx])
cal_probs = cal_clf.predict_proba(X[test_idx]).T[1]
cal_score = log_loss(y[test_idx] == 1, cal_probs)
cal_brier = brier_score_loss(y[test_idx] == 1, cal_probs)
logger.info('cal_brier = %r' % (cal_brier,))
logger.info('uncal_brier = %r' % (uncal_brier,))
logger.info('uncal_score = %r' % (uncal_score,))
logger.info('cal_score = %r' % (cal_score,))
import wbia.plottool as pt
ut.qtensure()
pt.figure()
ax = pt.gca()
y_test = y[test_idx] == 1
fraction_of_positives, mean_predicted_value = calibration_curve(
y_test, uncal_probs, n_bins=10
)
ax.plot([0, 1], [0, 1], 'k:', label='Perfectly calibrated')
ax.plot(
mean_predicted_value,
fraction_of_positives,
's-',
label='%s (%1.3f)' % ('uncal-RF', uncal_brier),
)
fraction_of_positives, mean_predicted_value = calibration_curve(
y_test, cal_probs, n_bins=10
)
ax.plot(
mean_predicted_value,
fraction_of_positives,
's-',
label='%s (%1.3f)' % ('cal-RF', cal_brier),
)
pt.legend()
@ut.reloadable_class
class ClfResult(ut.NiceRepr):
r"""
Handles evaluation statistics for a multiclass classifier trained on a
specific dataset with specific labels.
"""
# Attributes that identify the task and data the classifier is evaluated on
_key_attrs = ['task_key', 'data_key', 'class_names']
# Attributes about results and labels of individual samples
_datafame_attrs = ['probs_df', 'probhats_df', 'target_bin_df', 'target_enc_df']
def __init__(res):
pass
def __nice__(res):
return '{}, {}, {}'.format(res.task_key, res.data_key, len(res.index))
@property
def index(res):
return res.probs_df.index
@classmethod
def make_single(ClfResult, clf, X_df, test_idx, labels, data_key, feat_dims=None):
"""
Make a result for a single cross validiation subset
"""
X_df_test = X_df.iloc[test_idx]
if feat_dims is not None:
X_df_test = X_df_test[feat_dims]
index = X_df_test.index
# clf_probs = clf.predict_proba(X_df_test)
# index = pd.Series(test_idx, name='test_idx')
# Ensure shape corresponds with all classes
def align_cols(arr, arr_cols, target_cols):
import utool as ut
alignx = ut.list_alignment(arr_cols, target_cols, missing=True)
aligned_arrT = ut.none_take(arr.T, alignx)
aligned_arrT = ut.replace_nones(aligned_arrT, np.zeros(len(arr)))
aligned_arr = np.vstack(aligned_arrT).T
return aligned_arr
res = ClfResult()
res.task_key = labels.task_name
res.data_key = data_key
res.class_names = ut.lmap(str, labels.class_names)
res.feat_dims = feat_dims
res.probs_df = sklearn_utils.predict_proba_df(clf, X_df_test, res.class_names)
res.target_bin_df = labels.indicator_df.iloc[test_idx]
res.target_enc_df = labels.encoded_df.iloc[test_idx]
if hasattr(clf, 'estimators_') and labels.n_classes > 2:
# The n-th estimator in the OVR classifier predicts the prob of the
# n-th class (as label 1).
probs_hat = np.hstack(
[est.predict_proba(X_df_test)[:, 1:2] for est in clf.estimators_]
)
res.probhats_df = pd.DataFrame(
align_cols(probs_hat, clf.classes_, labels.classes_),
index=index,
columns=res.class_names,
)
# In the OVR-case, ideally things will sum to 1, but when they
# don't normalization happens. An Z-value of more than 1 means
# overconfidence, and under 0 means underconfidence.
res.confidence_ratio = res.probhats_df.sum(axis=1)
else:
res.probhats_df = None
return res
def compress(res, flags):
res2 = ClfResult()
res2.task_key = res.task_key
res2.data_key = res.data_key
res2.class_names = res.class_names
res2.probs_df = res.probs_df[flags]
res2.target_bin_df = res.target_bin_df[flags]
res2.target_enc_df = res.target_enc_df[flags]
if res.probhats_df is None:
res2.probhats_df = None
else:
res2.probhats_df = res.probhats_df[flags]
# res2.confidence_ratio = res.confidence_ratio[flags]
return res2
@classmethod
def combine_results(ClfResult, res_list, labels=None):
"""
Combine results from cross validation runs into a single result
representing the performance of the entire dataset
"""
# Ensure that res_lists are not overlapping
for r1, r2 in ut.combinations(res_list, 2):
assert (
len(r1.index.intersection(r2.index)) == 0
), 'ClfResult dataframes must be disjoint'
# sanity check
for r in res_list:
assert np.all(r.index == r.probs_df.index)
assert np.all(r.index == r.target_bin_df.index)
assert np.all(r.index == r.target_enc_df.index)
# Combine them with pandas
res = ClfResult()
res0 = res_list[0]
# Transfer single attributes (which should all be the same)
for attr in ClfResult._key_attrs:
val = getattr(res0, attr)
setattr(res, attr, val)
assert all(
[getattr(r, attr) == val for r in res_list]
), 'ClfResult with different key attributes are incompatible'
# Combine dataframe properties (which should all have disjoint indices)
for attr in ClfResult._datafame_attrs:
if getattr(res0, attr) is not None:
combo_attr = pd.concat([getattr(r, attr) for r in res_list])
setattr(res, attr, combo_attr)
else:
setattr(res, attr, None)
for attr in ClfResult._datafame_attrs:
val = getattr(res, attr)
if val is not None:
assert np.all(res.index == val.index), 'index got weird'
return res
def hardness_analysis(res, samples, infr=None, method='argmax'):
"""
samples = pblm.samples
# TODO MWE with sklearn data
# ClfResult.make_single(ClfResult, clf, X_df, test_idx, labels,
# data_key, feat_dims=None):
import sklearn.datasets
iris = sklearn.datasets.load_iris()
# TODO: make this setup simpler
pblm = ClfProblem()
task_key, clf_key, data_key = 'iris', 'RF', 'learn(all)'
X_df = pd.DataFrame(iris.data, columns=iris.feature_names)
samples = MultiTaskSamples(X_df.index)
samples.apply_indicators({'iris': {name: iris.target == idx
for idx, name in enumerate(iris.target_names)}})
samples.X_dict = {'learn(all)': X_df}
pblm.samples = samples
pblm.xval_kw['type'] = 'StratifiedKFold'
clf_list, res_list = pblm._train_evaluation_clf(
task_key, data_key, clf_key)
labels = pblm.samples.subtasks[task_key]
res = ClfResult.combine_results(res_list, labels)
res.get_thresholds('mcc', 'maximize')
predict_method = 'argmax'
"""
meta = {}
easiness = ut.ziptake(res.probs_df.values, res.target_enc_df.values)
# pred = sklearn_utils.predict_from_probs(res.probs_df, predict_method)
if method == 'max-mcc':
method = res.get_thresholds('mcc', 'maximize')
pred = sklearn_utils.predict_from_probs(res.probs_df, method, force=True)
meta['easiness'] = np.array(easiness).ravel()
meta['hardness'] = 1 - meta['easiness']
meta['aid1'] = res.probs_df.index.get_level_values(0)
meta['aid2'] = res.probs_df.index.get_level_values(1)
# meta['aid1'] = samples.aid_pairs.T[0].take(res.probs_df.index.values)
# meta['aid2'] = samples.aid_pairs.T[1].take(res.probs_df.index.values)
# meta['pred'] = res.probs_df.values.argmax(axis=1)
meta['pred'] = pred.values
meta['real'] = res.target_enc_df.values.ravel()
meta['failed'] = meta['pred'] != meta['real']
meta = pd.DataFrame(meta)
meta = meta.set_index(['aid1', 'aid2'], drop=False)
if infr is not None:
ibs = infr.ibs
edges = list(meta.index.tolist())
conf_dict = infr.get_edge_attrs(
'confidence',
edges,
on_missing='filter',
default=ibs.const.CONFIDENCE.CODE.UNKNOWN,
)
conf_df = pd.DataFrame.from_dict(conf_dict, orient='index')
conf_df = conf_df[0].map(ibs.const.CONFIDENCE.CODE_TO_INT)
meta = meta.assign(real_conf=conf_df)
meta['real_conf'] = np.nan_to_num(meta['real_conf']).astype(np.int)
meta = meta.sort_values('hardness', ascending=False)
res.meta = meta
return res.meta
def missing_classes(res):
# Find classes that were never predicted
unique_predictions = np.unique(res.probs_df.values.argmax(axis=1))
n_classes = len(res.class_names)
missing_classes = ut.index_complement(unique_predictions, n_classes)
return missing_classes
def augment_if_needed(res):
"""
Adds in dummy values for missing classes
"""
missing_classes = res.missing_classes()
n_classes = len(res.class_names)
y_test_enc_aug = res.target_enc_df.values
y_test_bin_aug = res.target_bin_df.values
clf_probs_aug = res.probs_df.values
sample_weight = np.ones(len(y_test_enc_aug))
n_missing = len(missing_classes)
if res.probhats_df is not None:
clf_probhats_aug = res.probhats_df.values
else:
clf_probhats_aug = None
# Check if augmentation is necessary
if n_missing > 0:
missing_bin = np.zeros((n_missing, n_classes))
missing_bin[(np.arange(n_missing), missing_classes)] = 1.0
missing_enc = np.array(missing_classes)[:, None]
y_test_enc_aug = np.vstack([y_test_enc_aug, missing_enc])
y_test_bin_aug = np.vstack([y_test_bin_aug, missing_bin])
clf_probs_aug = np.vstack([clf_probs_aug, missing_bin])
# make sample weights where dummies have no weight
sample_weight = np.hstack([sample_weight, np.full(n_missing, 0)])
if res.probhats_df is not None:
clf_probhats_aug = np.vstack([clf_probhats_aug, missing_bin])
res.clf_probs = clf_probs_aug
res.clf_probhats = clf_probhats_aug
res.y_test_enc = y_test_enc_aug
res.y_test_bin = y_test_bin_aug
res.sample_weight = sample_weight
def extended_clf_report(res, verbose=True):
res.augment_if_needed()
pred_enc = res.clf_probs.argmax(axis=1)
y_pred = pred_enc
y_true = res.y_test_enc
sample_weight = res.sample_weight
target_names = res.class_names
report = sklearn_utils.classification_report2(
y_true,
y_pred,
target_names=target_names,
sample_weight=sample_weight,
verbose=verbose,
)
return report
def print_report(res):
res.augment_if_needed()
pred_enc = res.clf_probs.argmax(axis=1)
res.extended_clf_report()
report = sklearn.metrics.classification_report(
y_true=res.y_test_enc,
y_pred=pred_enc,
target_names=res.class_names,
sample_weight=res.sample_weight,
)
logger.info('Precision/Recall Report:')
logger.info(report)
def get_thresholds(res, metric='mcc', value='maximize'):
"""
get_metric = 'thresholds'
at_metric = metric = 'mcc'
at_value = value = 'maximize'
a = []
b = []
for x in np.linspace(0, 1, 1000):
a += [cfms.get_metric_at_metric('thresholds', 'fpr', x, subindex=True)]
b += [cfms.get_thresh_at_metric('fpr', x)]
a = np.array(a)
b = np.array(b)
d = (a - b)
logger.info((d.min(), d.max()))
"""
threshes = {}
for class_name in res.class_names:
cfms = res.confusions(class_name)
thresh = cfms.get_metric_at_metric('thresh', metric, value)
threshes[class_name] = thresh
return threshes
@profile
def get_pos_threshes(
res,
metric='fpr',
value=1e-4,
maximize=False,
warmup=200,
priors=None,
min_thresh=0.5,
):
"""
Finds a threshold that achieves the desired `value` for the desired
metric, while maximizing or minimizing the threshold.
For positive classification you want to minimize the threshold.
Priors can be passed in to augment probabilities depending on support.
By default a class prior is 1 for threshold minimization and 0 for
maximization.
"""
pos_threshes = {}
if priors is None:
priors = {name: float(not maximize) for name in res.class_names}
for class_name in res.class_names:
cfms = res.confusions(class_name)
learned_thresh = cfms.get_metric_at_metric('thresh', metric, value)
# learned_thresh = cfms.get_thresh_at_metric(
# metric, value, maximize=maximize)
prior_thresh = priors[class_name]
n_support = cfms.n_pos
if warmup is not None:
"""
python -m wbia.plottool.draw_func2 plot_func --show --range=0,1 \
--func="lambda x: np.maximum(0, (x - .6) / (1 - .6))"
"""
# If n_support < warmup: then interpolate to learned thresh
nmax = warmup if isinstance(warmup, int) else warmup[class_name]
# alpha varies from 0 to 1
alpha = min(nmax, n_support) / nmax
# transform alpha through nonlinear function (similar to ReLU)
p = 0.6 # transition point
alpha = max(0, (alpha - p) / (1 - p))
thresh = prior_thresh * (1 - alpha) + learned_thresh * (alpha)
else:
thresh = learned_thresh
pos_threshes[class_name] = max(min_thresh, thresh)
return pos_threshes
def report_thresholds(res, warmup=200):
# import vtool as vt
ut.cprint('Threshold Report', 'yellow')
y_test_bin = res.target_bin_df.values
# y_test_enc = y_test_bin.argmax(axis=1)
# clf_probs = res.probs_df.values
# The maximum allowed false positive rate
# We expect that we will make 1 error every 1,000 decisions
# thresh_df['foo'] = [1, 2, 3]
# thresh_df['foo'][res.class_names[k]] = 1
# for k in [2, 0, 1]:
choice_mv = ut.odict(
[
('@fpr=.01', ('fpr', 0.01)),
('@fpr=.001', ('fpr', 0.001)),
('@fpr=.0001', ('fpr', 1e-4)),
('@fpr=.0000', ('fpr', 0)),
('@max(mcc)', ('mcc', 'max')),
# (class_name + '@max(acc)', ('acc', 'max')),
# (class_name + '@max(mk)', ('mk', 'max')),
# (class_name + '@max(bm)', ('bm', 'max')),
]
)
for k in range(y_test_bin.shape[1]):
thresh_dict = ut.odict()
class_name = res.class_names[k]
cfms = res.confusions(class_name)
# probs, labels = clf_probs.T[k], y_test_bin.T[k]
# cfms = vt.ConfusionMetrics().fit(probs, labels)
for k, mv in choice_mv.items():
metric, value = mv
idx = cfms.get_index_at_metric(metric, value)
key = class_name + k
thresh_dict[key] = ut.odict()
for metric in ['thresh', 'fpr', 'tpr', 'tpa', 'bm', 'mk', 'mcc']:
thresh_dict[key][metric] = cfms.get_metric_at_index(metric, idx)
thresh_df = pd.DataFrame.from_dict(thresh_dict, orient='index')
thresh_df = thresh_df.loc[list(thresh_dict.keys())]
if cfms.n_pos > 0 and cfms.n_neg > 0:
logger.info('Raw 1vR {} Thresholds'.format(class_name))
logger.info(ut.indent(thresh_df.to_string(float_format='{:.4f}'.format)))
# chosen_type = class_name + '@fpr=0'
# pos_threshes[class_name] = thresh_df.loc[chosen_type]['thresh']
for choice_k, choice_mv in iter(choice_mv.items()):
metric, value = choice_mv
pos_threshes = res.get_pos_threshes(metric, value, warmup=warmup)
logger.info('Choosing threshold based on %s' % (choice_k,))
res.report_auto_thresholds(pos_threshes)
def report_auto_thresholds(res, threshes, verbose=True):
report_lines = []
print_ = report_lines.append
print_(
'Chosen thresholds = %s'
% (ut.repr2(threshes, nl=1, precision=4, align=True),)
)
res.augment_if_needed()
target_names = res.class_names
sample_weight = res.sample_weight
y_true = res.y_test_enc.ravel()
y_pred, can_autodecide = sklearn_utils.predict_from_probs(
res.clf_probs,
threshes,
res.class_names,
force=False,
multi=False,
return_flags=True,
)
can_autodecide[res.sample_weight == 0] = False
auto_pred = y_pred[can_autodecide].astype(np.int)
auto_true = y_true[can_autodecide].ravel()
auto_probs = res.clf_probs[can_autodecide]
total_cases = int(sample_weight.sum())
print_('Will autodecide for %r/%r cases' % (can_autodecide.sum(), (total_cases)))
def frac_str(a, b):
return '{:}/{:} = {:.2f}%'.format(int(a), int(b), a / b)
y_test_bin = res.target_bin_df.values
supported_class_idxs = [k for k, y in enumerate(y_test_bin.T) if y.sum() > 0]
print_(' * Auto-Decide Per-Class Summary')
for k in supported_class_idxs:
# Look at fail/succs in threshold
name = res.class_names[k]
# number of times this class appears overall
n_total_k = (y_test_bin.T[k]).sum()
# get the cases where this class was predicted
auto_true_k = auto_true == k
auto_pred_k = auto_pred == k
# number of cases auto predicted
n_pred_k = auto_pred_k.sum()
# number of times auto was right
n_tp = (auto_true_k & auto_pred_k).sum()
# number of times auto was wrong
n_fp = (~auto_true_k & auto_pred_k).sum()
fail_str = frac_str(n_fp, n_pred_k)
pass_str = frac_str(n_tp, n_total_k)
fmtstr = '\n'.join(
[
'{name}:',
' {n_total_k} samples existed, and did {n_pred_k} auto predictions',
' got {pass_str} right',
' made {fail_str} errors',
]
)
print_(ut.indent(fmtstr.format(**locals())))
report = sklearn_utils.classification_report2(
y_true,
y_pred,
target_names=target_names,
sample_weight=can_autodecide.astype(np.float),
verbose=False,
)
print_(' * Auto-Decide Confusion')
print_(ut.indent(str(report['confusion'])))
print_(' * Auto-Decide Metrics')
print_(ut.indent(str(report['metrics'])))
if 'mcc' in report:
print_(ut.indent(str(report['mcc'])))
try:
auto_truth_bin = res.y_test_bin[can_autodecide]
for k in supported_class_idxs:
auto_truth_k = auto_truth_bin.T[k]
auto_probs_k = auto_probs.T[k]
if auto_probs_k.sum():
auc = sklearn.metrics.roc_auc_score(auto_truth_k, auto_probs_k)
print_(
' * Auto AUC(Macro): {:.4f} for class={}'.format(
auc, res.class_names[k]
)
)
except ValueError:
pass
report = '\n'.join(report_lines)
if verbose:
logger.info(report)
return report
def confusions(res, class_name):
import vtool as vt
y_test_bin = res.target_bin_df.values
clf_probs = res.probs_df.values
k = res.class_names.index(class_name)
probs, labels = clf_probs.T[k], y_test_bin.T[k]
confusions = vt.ConfusionMetrics().fit(probs, labels)
return confusions
def ishow_roc(res):
import vtool as vt
import wbia.plottool as pt
ut.qtensure()
y_test_bin = res.target_bin_df.values
# The maximum allowed false positive rate
# We expect that we will make 1 error every 1,000 decisions
# thresh_df['foo'] = [1, 2, 3]
# thresh_df['foo'][res.class_names[k]] = 1
# for k in [2, 0, 1]:
for k in range(y_test_bin.shape[1]):
if y_test_bin.shape[1] == 2 and k == 0:
# only show one in the binary case
continue
class_name = res.class_names[k]
confusions = res.confusions(class_name)
ROCInteraction = vt.interact_roc_factory(
confusions, show_operating_point=True
)
fnum = pt.ensure_fnum(k)
# ROCInteraction.static_plot(fnum, None, name=class_name)
inter = ROCInteraction(fnum=fnum, pnum=None, name=class_name)
inter.start()
# if False:
# X = probs
# y = labels
# encoder = vt.ScoreNormalizer()
# encoder.fit(probs, labels)
# learn_thresh = encoder.learn_threshold2()
# encoder.inverse_normalize(learn_thresh)
# encoder.visualize(fnum=k)
pass
def show_roc(res, class_name, **kwargs):
import vtool as vt
labels = res.target_bin_df[class_name].values
probs = res.probs_df[class_name].values
confusions = vt.ConfusionMetrics().fit(probs, labels)
confusions.draw_roc_curve(**kwargs)
def roc_scores_ovr_hat(res):
res.augment_if_needed()
for k in range(len(res.class_names)):
class_k_truth = res.y_test_bin.T[k]
class_k_probs = res.probhats_df.values.T[k]
auc = sklearn.metrics.roc_auc_score(class_k_truth, class_k_probs)
yield auc
def roc_scores_ovr(res):
res.augment_if_needed()
for k in range(res.y_test_bin.shape[1]):
class_k_truth = res.y_test_bin.T[k]
class_k_probs = res.clf_probs.T[k]
auc = sklearn.metrics.roc_auc_score(class_k_truth, class_k_probs)
yield auc
def confusions_ovr(res):
# one_vs_rest confusions
import vtool as vt
res.augment_if_needed()
for k in range(res.y_test_bin.shape[1]):
class_k_truth = res.y_test_bin.T[k]
class_k_probs = res.clf_probs.T[k]
cfms = vt.ConfusionMetrics().fit(class_k_probs, class_k_truth)
# auc = sklearn.metrics.roc_auc_score(class_k_truth, class_k_probs)
yield res.class_names[k], cfms
def roc_score(res):
res.augment_if_needed()
auc_learn = sklearn.metrics.roc_auc_score(res.y_test_bin, res.clf_probs)
return auc_learn
@ut.reloadable_class
class MultiTaskSamples(ut.NiceRepr):
"""
Handles samples (i.e. feature-label pairs) with a combination of
non-mutually exclusive subclassification labels
CommandLine:
python -m wbia.algo.verif.clf_helpers MultiTaskSamples
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.verif.clf_helpers import * # NOQA
>>> samples = MultiTaskSamples([0, 1, 2, 3])
>>> tasks_to_indicators = ut.odict([
>>> ('task1', ut.odict([
>>> ('state1', [0, 0, 0, 1]),
>>> ('state2', [0, 0, 1, 0]),
>>> ('state3', [1, 1, 0, 0]),
>>> ])),
>>> ('task2', ut.odict([
>>> ('state4', [0, 0, 0, 1]),
>>> ('state5', [1, 1, 1, 0]),
>>> ]))
>>> ])
>>> samples.apply_indicators(tasks_to_indicators)
"""
def __init__(samples, index):
samples.index = index
samples.subtasks = ut.odict()
# def set_simple_scores(samples, simple_scores):
# if simple_scores is not None:
# edges = ut.emap(tuple, samples.aid_pairs.tolist())
# assert (edges == simple_scores.index.tolist())
# samples.simple_scores = simple_scores
# def set_feats(samples, X_dict):
# if X_dict is not None:
# edges = ut.emap(tuple, samples.aid_pairs.tolist())
# for X in X_dict.values():
# assert np.all(edges == X.index.tolist())
# samples.X_dict = X_dict
def supported_tasks(samples):
for task_key, labels in samples.subtasks.items():
labels = samples.subtasks[task_key]
if labels.has_support():
yield task_key
def apply_indicators(samples, tasks_to_indicators):
"""
Adds labels for a specific task
Args:
tasks_to_indicators (dict): takes the form:
{
`my_task_name1' {
'class1': [list of bools indicating class membership]
...
'classN': [list of bools indicating class membership]
}
...
`my_task_nameN': ...
}
"""
n_samples = None
samples.n_tasks = len(tasks_to_indicators)
for task_name, indicator in tasks_to_indicators.items():
labels = MultiClassLabels.from_indicators(
indicator, task_name=task_name, index=samples.index
)
samples.subtasks[task_name] = labels
if n_samples is None:
n_samples = labels.n_samples
elif n_samples != labels.n_samples:
raise ValueError('numer of samples is different')
samples.n_samples = n_samples
def apply_encoded_labels(samples, y_enc, class_names, task_name):
"""
Adds labels for a specific task. Alternative to `apply_indicators`
Args:
y_enc (list): integer label indicating the class for each sample
class_names (list): list of strings indicating the class-domain
task_name (str): key for denoting this specific task
"""
# convert to indicator structure and use that
tasks_to_indicators = ut.odict(
[
(
task_name,
ut.odict(
[
(name, np.array(y_enc) == i)
for i, name in enumerate(class_names)
]
),
)
]
)
samples.apply_indicators(tasks_to_indicators)
# @ut.memoize
def encoded_2d(samples):
encoded_2d = pd.concat([v.encoded_df for k, v in samples.items()], axis=1)
return encoded_2d
def class_name_basis(samples):
"""corresponds with indexes returned from encoded1d"""
class_name_basis = [
t[::-1]
for t in ut.product(*[v.class_names for k, v in samples.items()][::-1])
]
# class_name_basis = [(b, a) for a, b in ut.product(*[
# v.class_names for k, v in samples.items()][::-1])]
return class_name_basis
def class_idx_basis_2d(samples):
"""2d-index version of class_name_basis"""
class_idx_basis_2d = [
(b, a)
for a, b in ut.product(
*[range(v.n_classes) for k, v in samples.items()][::-1]
)
]
return class_idx_basis_2d
def class_idx_basis_1d(samples):
"""1d-index version of class_name_basis"""
n_states = np.prod([v.n_classes for k, v in samples.items()])
class_idx_basis_1d = np.arange(n_states, dtype=np.int)
return class_idx_basis_1d
# @ut.memoize
def encoded_1d(samples):
"""Returns a unique label for each combination of samples"""
# from sklearn.preprocessing import MultiLabelBinarizer
encoded_2d = samples.encoded_2d()
class_space = [v.n_classes for k, v in samples.items()]
offsets = np.array([1] + np.cumprod(class_space).tolist()[:-1])[None, :]
encoded_1d = (offsets * encoded_2d).sum(axis=1)
# e = MultiLabelBinarizer()
# bin_coeff = e.fit_transform(encoded_2d)
# bin_basis = (2 ** np.arange(bin_coeff.shape[1]))[None, :]
# # encoded_1d = (bin_coeff * bin_basis).sum(axis=1)
# encoded_1d = (bin_coeff * bin_basis[::-1]).sum(axis=1)
# # vt.unique_rows(sklearn.preprocessing.MultiLabelBinarizer().fit_transform(encoded_2d))
# [v.encoded_df.values for k, v in samples.items()]
# encoded_df_1d = pd.concat([v.encoded_df for k, v in samples.items()], axis=1)
return encoded_1d
def __nice__(samples):
return 'nS=%r, nT=%r' % (len(samples), samples.n_tasks)
def __getitem__(samples, task_key):
return samples.subtasks[task_key]
def __len__(samples):
return samples.n_samples
def print_info(samples):
for task_name, labels in samples.items():
labels.print_info()
logger.info('hist(all) = %s' % (ut.repr4(samples.make_histogram())))
logger.info('len(all) = %s' % (len(samples)))
def make_histogram(samples):
"""label histogram"""
class_name_basis = samples.class_name_basis()
class_idx_basis_1d = samples.class_idx_basis_1d()
# logger.info('class_idx_basis_1d = %r' % (class_idx_basis_1d,))
# logger.info(samples.encoded_1d())
multi_task_idx_hist = ut.dict_hist(
samples.encoded_1d().values, labels=class_idx_basis_1d
)
multi_task_hist = ut.map_keys(lambda k: class_name_basis[k], multi_task_idx_hist)
return multi_task_hist
def items(samples):
for task_name, labels in samples.subtasks.items():
yield task_name, labels
# def take(samples, idxs):
# mask = ut.index_to_boolmask(idxs, len(samples))
# return samples.compress(mask)
@property
def group_ids(samples):
return None
def stratified_kfold_indices(samples, **xval_kw):
"""
TODO: check xval label frequency
"""
from sklearn import model_selection
X = np.empty((len(samples), 0))
y = samples.encoded_1d().values
groups = samples.group_ids
type_ = xval_kw.pop('type', 'StratifiedGroupKFold')
if type_ == 'StratifiedGroupKFold':
assert groups is not None
# FIXME: The StratifiedGroupKFold could be implemented better.
splitter = sklearn_utils.StratifiedGroupKFold(**xval_kw)
skf_list = list(splitter.split(X=X, y=y, groups=groups))
elif type_ == 'StratifiedKFold':
splitter = model_selection.StratifiedKFold(**xval_kw)
skf_list = list(splitter.split(X=X, y=y))
return skf_list
def subsplit_indices(samples, subset_idx, **xval_kw):
"""split an existing set"""
from sklearn import model_selection
X = np.empty((len(subset_idx), 0))
y = samples.encoded_1d().values[subset_idx]
groups = samples.group_ids[subset_idx]
xval_kw_ = xval_kw.copy()
if 'n_splits' not in xval_kw_:
xval_kw_['n_splits'] = 3
type_ = xval_kw_.pop('type', 'StratifiedGroupKFold')
if type_ == 'StratifiedGroupKFold':
assert groups is not None
# FIXME: The StratifiedGroupKFold could be implemented better.
splitter = sklearn_utils.StratifiedGroupKFold(**xval_kw_)
rel_skf_list = list(splitter.split(X=X, y=y, groups=groups))
elif type_ == 'StratifiedKFold':
splitter = model_selection.StratifiedKFold(**xval_kw_)
rel_skf_list = list(splitter.split(X=X, y=y))
# map back into original coords
skf_list = [
(subset_idx[rel_idx1], subset_idx[rel_idx2])
for rel_idx1, rel_idx2 in rel_skf_list
]
for idx1, idx2 in skf_list:
assert len(np.intersect1d(subset_idx, idx1)) == len(idx1)
assert len(np.intersect1d(subset_idx, idx2)) == len(idx2)
# assert
return skf_list
@ut.reloadable_class
class MultiClassLabels(ut.NiceRepr):
"""
Used by samples to encode a single set of mutually exclusive labels. These
can either be binary or multiclass.
import pandas as pd
pd.options.display.max_rows = 10
# pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
"""
def __init__(labels):
# Helper Info
labels.task_name = None
labels.n_samples = None
labels.n_classes = None
labels.class_names = None
labels.classes_ = None
# Core data
labels.indicator_df = None
labels.encoded_df = None
labels.default_class = None
def has_support(labels):
return len(labels.make_histogram()) > 1
def lookup_class_idx(labels, class_name):
return ut.dzip(labels.class_names, labels.classes_)[class_name]
@classmethod
def from_indicators(MultiClassLabels, indicator, index=None, task_name=None):
labels = MultiClassLabels()
n_samples = len(next(iter(indicator.values())))
# if index is None:
# index = pd.Series(np.arange(n_samples), name='index')
indicator_df = | pd.DataFrame(indicator, index=index) | pandas.DataFrame |
import inspect
import os
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_convert_to_woodwork_structure,
_convert_woodwork_types_wrapper,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
infer_feature_types,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3])),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2, pd.Series([np.nan, np.nan, 1, 2, 3, 4])),
(pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]}), 0,
pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]})),
(pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "c"]}), 1,
pd.DataFrame({"a": [np.nan, 4, 5, 6], "b": [np.nan, "a", "b", "c"]})),
(pd.DataFrame({"a": [1, 0, 1]}), 2,
pd.DataFrame({"a": [np.nan, np.nan, 1, 0, 1]}))])
def test_pad_with_nans(data, num_to_pad, expected):
padded = pad_with_nans(data, num_to_pad)
_check_equality(padded, expected)
def test_pad_with_nans_with_series_name():
name = "data to pad"
data = pd.Series([1, 2, 3], name=name)
padded = pad_with_nans(data, 1)
_check_equality(padded, pd.Series([np.nan, 1, 2, 3], name=name))
@pytest.mark.parametrize("data, expected",
[([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [1., 2., 3, None]})],
[pd.Series([1., 2.], index=pd.Int64Index([1, 2])),
pd.DataFrame({"a": [2., 3.]}, index=pd.Int64Index([1, 2]))]),
([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [3., 4., None, None]})],
[pd.Series([1.], index=pd.Int64Index([1])),
pd.DataFrame({"a": [4.]}, index=pd.Int64Index([1]))]),
([pd.DataFrame(), pd.Series([None, 1., 2., 3.])],
[pd.DataFrame(), pd.Series([1., 2., 3.], index=pd.Int64Index([1, 2, 3]))]),
([pd.DataFrame({"a": [1., 2., None]}), pd.Series([])],
[pd.DataFrame({"a": [1., 2.]}), pd.Series([])])
])
def test_drop_nan(data, expected):
no_nan_1, no_nan_2 = drop_rows_with_nans(*data)
_check_equality(no_nan_1, expected[0], check_index_type=False)
_check_equality(no_nan_2, expected[1], check_index_type=False)
def test_rename_column_names_to_numeric():
X = np.array([[1, 2], [3, 4]])
pd.testing.assert_frame_equal(_rename_column_names_to_numeric(X), pd.DataFrame(X))
X = pd.DataFrame({"<>": [1, 2], ">>": [2, 4]})
pd.testing.assert_frame_equal(_rename_column_names_to_numeric(X), pd.DataFrame({0: [1, 2], 1: [2, 4]}))
X = ww.DataTable(pd.DataFrame({"<>": [1, 2], ">>": [2, 4]}), logical_types={"<>": "categorical", ">>": "categorical"})
X_renamed = _rename_column_names_to_numeric(X)
X_expected = pd.DataFrame({0: pd.Series([1, 2], dtype="category"), 1: pd.Series([2, 4], dtype="category")})
pd.testing.assert_frame_equal(X_renamed.to_dataframe(), X_expected)
assert X_renamed.logical_types == {0: ww.logical_types.Categorical, 1: ww.logical_types.Categorical}
def test_convert_woodwork_types_wrapper_with_nan():
y = _convert_woodwork_types_wrapper(pd.Series([1, 2, None], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, np.nan], dtype="float64"))
y = _convert_woodwork_types_wrapper(pd.array([1, 2, None], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, np.nan], dtype="float64"))
y = _convert_woodwork_types_wrapper(pd.Series(["a", "b", None], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", np.nan], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.array(["a", "b", None], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", np.nan], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.Series([True, False, None], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, np.nan]))
y = _convert_woodwork_types_wrapper(pd.array([True, False, None], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, np.nan]))
def test_convert_woodwork_types_wrapper():
y = _convert_woodwork_types_wrapper(pd.Series([1, 2, 3], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, 3], dtype="int64"))
y = _convert_woodwork_types_wrapper(pd.array([1, 2, 3], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, 3], dtype="int64"))
y = _convert_woodwork_types_wrapper(pd.Series(["a", "b", "a"], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", "a"], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.array(["a", "b", "a"], dtype="string"))
pd.testing.assert_series_equal(y, | pd.Series(["a", "b", "a"], dtype="object") | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
def load_data():
data = | pd.read_csv('data/data_for_ml_clin_only.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Methods to perform coverage analysis.
@author: <NAME> <<EMAIL>>
"""
import pandas as pd
import numpy as np
import geopandas as gpd
from typing import List, Optional
from shapely import geometry as geo
from datetime import datetime, timedelta
from skyfield.api import load, wgs84, EarthSatellite
from ..schemas.point import Point
from ..schemas.satellite import Satellite
from ..schemas.instrument import Instrument, DutyCycleScheme
from ..utils import (
compute_min_altitude,
swath_width_to_field_of_regard,
compute_max_access_time,
compute_orbit_period,
)
def collect_observations(
point: Point,
satellite: Satellite,
instrument: Instrument,
start: datetime,
end: datetime,
omit_solar: bool = True,
sample_distance: Optional[float] = None,
) -> gpd.GeoDataFrame:
"""
Collect single satellite observations of a geodetic point of interest.
:param point: The ground point of interest
:type point: :class:`tatc.schemas.point.Point`
:param satellite: The observing satellite
:type satellite: :class:`tatc.schemas.satellite.Satellite`
:param instrument: The instrument used to make observations
:type instrument::`tatc.schemas.instrument.instrument`
:param start: The start of the mission window
:type start::`datetime.datetime`
:param end: The end of the mission window
:type end::`datetime.datetime`
:param omit_solar: True, if solar angles should be omitted
to improve computational efficiency, defaults to True
:type omit_solar: bool, optional
:param sample_distance: Ground sample distance (m) to override
instrument field of regard, defaults to None
:type sample_distance: int, optional
:return: An instance of :class:`geopandas.GeoDataFrame` containing all
recorded reduce_observations
:rtype::`geopandas.GeoDataFrame`
"""
# build a topocentric point at the designated geodetic point
topos = wgs84.latlon(point.latitude, point.longitude)
# load the timescale and define starting and ending points
ts = load.timescale()
t0 = ts.from_datetime(start)
t1 = ts.from_datetime(end)
# load the ephemerides
eph = load("de421.bsp")
# convert orbit to tle
orbit = satellite.orbit.to_tle()
# construct a satellite for propagation
sat = EarthSatellite(orbit.tle[0], orbit.tle[1], satellite.name)
# compute the initial satellite height (altitude)
satellite_height = wgs84.subpoint(sat.at(t0)).elevation.m
# compute the minimum altitude angle required for observation
min_altitude = compute_min_altitude(
satellite_height,
instrument.field_of_regard
if sample_distance is None
else swath_width_to_field_of_regard(satellite_height, sample_distance),
)
# compute the maximum access time to filter bad data
max_access_time = timedelta(
seconds=compute_max_access_time(satellite_height, min_altitude)
)
# TODO: consider instrument operational intervals
ops_intervals = pd.Series(
[pd.Interval(pd.Timestamp(start), pd.Timestamp(end), "both")]
)
# find the set of observation events
t, events = sat.find_events(topos, t0, t1, altitude_degrees=min_altitude)
if omit_solar:
# basic dataframe without solar angles
df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"sat_alt": pd.Series([], dtype="float64"),
"sat_az": pd.Series([], dtype="float64"),
}
)
else:
# extended dataframe including solar angles
df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"sat_alt": pd.Series([], dtype="float64"),
"sat_az": pd.Series([], dtype="float64"),
"sat_sunlit": pd.Series([], dtype="bool"),
"solar_alt": pd.Series([], dtype="float64"),
"solar_az": pd.Series([], dtype="float64"),
"solar_time": pd.Series([], dtype="float64"),
}
)
# define variables for stepping through the events list
t_rise = None
t_culminate = None
sat_sunlit = None
solar_time = None
sat_alt = None
sat_az = None
solar_alt = None
solar_az = None
# check for geocentricity
if np.all(events == 1) and events:
# find the satellite altitude, azimuth, and distance at t0
sat_alt, sat_az, sat_dist = (sat - topos).at(t[0]).altaz()
# if ommiting solar angles
if omit_solar:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": start,
"epoch": start + (end - start) / 2,
"end": end,
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
}, index=[0]
)
], ignore_index=True)
# otherwise if solar angles are included
else:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": start,
"epoch": start + (end - start) / 2,
"end": end,
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
"sat_sunlit": None,
"solar_alt": None,
"solar_az": None,
"solar_time": None
}, index=[0]
)
], ignore_index=True)
# compute the access time for the observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
for j in range(len(events)):
if events[j] == 0:
# record the rise time
t_rise = t[j].utc_datetime()
elif events[j] == 1:
# record the culmination time
t_culminate = t[j].utc_datetime()
# find the satellite altitude, azimuth, and distance
sat_alt, sat_az, sat_dist = (sat - topos).at(t[j]).altaz()
if not omit_solar or instrument.req_target_sunlit is not None:
# find the solar altitude, azimuth, and distance
solar_obs = (
(eph["earth"] + topos).at(t[j]).observe(eph["sun"]).apparent()
)
solar_alt, solar_az, solar_dist = solar_obs.altaz()
# find the local solar time
solar_time = solar_obs.hadec()[0].hours + 12
if not omit_solar or instrument.req_self_sunlit is not None:
# find whether the satellite is sunlit
sat_sunlit = sat.at(t[j]).is_sunlit(eph)
elif events[j] == 2:
# record the set time
t_set = t[j].utc_datetime()
# only record an observation if a previous rise and culminate
# events were recorded (sometimes they are out-of-order)
if t_rise is not None and t_culminate is not None:
# check if the observation meets minimum access duration,
# ground sunlit conditions, and satellite sunlit conditions
if (
instrument.min_access_time <= t_set - t_rise <= max_access_time * 2
and instrument.is_valid_observation(
eph,
ts.from_datetime(t_culminate),
sat.at(ts.from_datetime(t_culminate)),
)
and (
instrument.duty_cycle >= 1
or any(ops_intervals.apply(lambda i: t_culminate in i))
)
):
# if omitting solar angles
if omit_solar:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": pd.Timestamp(t_rise),
"epoch": pd.Timestamp(t_culminate),
"end": pd.Timestamp(t_set),
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
}, index=[0]
)
], ignore_index=True)
# otherwise if solar angles are included
else:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": pd.Timestamp(t_rise),
"epoch": pd.Timestamp(t_culminate),
"end": pd.Timestamp(t_set),
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
"sat_sunlit": sat_sunlit,
"solar_alt": solar_alt.degrees,
"solar_az": solar_az.degrees,
"solar_time": solar_time,
}, index=[0]
)
], ignore_index=True)
# reset the variables for stepping through the event list
t_rise = None
t_culminate = None
sat_sunlit = None
solar_time = None
sat_alt = None
sat_az = None
solar_alt = None
solar_az = None
# compute the access time for each observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
def collect_multi_observations(
point: Point,
satellites: List[Satellite],
start: datetime,
end: datetime,
omit_solar: bool = True,
sample_distance: Optional[float] = None,
) -> gpd.GeoDataFrame:
"""
Collect multiple satellite observations of a geodetic point of interest.
:param point: The ground point of interest
:type point: :class:`tatc.schemas.point.Point`
:param satellites: The observing satellites
:type satellites: list of :class:`tatc.schemas.satellite.Satellite`
:param start: The start of the mission window
:type start: :`datetime.datetime`
:param end: The end of the mission window
:type end: :class:`datetime.datetime`
:param omit_solar: True, if solar angles should be omitted
to improve computational efficiency, defaults to True
:type omit_solar: bool, optional
:param sample_distance: Ground sample distance (m) to override
instrument field of regard, defaults to None
:type sample_distance: int, optional
:return: an instance of :class:`geopandas.GeoDataFrame` containing all
recorded observations
:rtype: :class:`geopandas.GeoDataFrame`
"""
gdfs = [
collect_observations(
point, satellite, instrument, start, end, omit_solar, sample_distance
)
for constellation in satellites
for satellite in (constellation.generate_members())
for instrument in satellite.instruments
]
# merge the observations into one data frame
df = pd.concat(gdfs, ignore_index=True)
# sort the values by start datetime
df = df.sort_values("start")
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
def aggregate_observations(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Aggregate constellation observations for a geodetic point of interest.
:param gdf: The individual observations.
:type gdf: :class:`geopandas.GeoDataFrame`
:return: An instance of :class:`geopandas.GeoDataFrame` The data frame
with aggregated observations.
:rtype: :class:`geopandas.GeoDataFrame`
"""
if len(gdf.index) == 0:
empty_df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"access": pd.Series([], dtype="timedelta64[ns]"),
"revisit": pd.Series([], dtype="timedelta64[ns]")
}
)
return gpd.GeoDataFrame(empty_df, geometry=empty_df.geometry, crs="EPSG:4326")
# sort the values by start datetime
df = gdf.sort_values("start")
# assign the observation group number based on overlapping start/end times
df["obs"] = (df["start"] > df["end"].shift().cummax()).cumsum()
if all(key in gdf.columns for key in ["solar_alt", "solar_az", "solar_time"]):
# reduce solar angles
df = df.groupby("obs").agg(
{
"point_id": "first",
"geometry": "first",
"start": "min",
"epoch": "first",
"end": "max",
"solar_alt": "mean",
"solar_az": "mean",
"solar_time": "mean",
"satellite": ", ".join,
"instrument": ", ".join,
}
)
else:
# reduce only core attributes
df = df.groupby("obs").agg(
{
"point_id": "first",
"geometry": "first",
"start": "min",
"epoch": "first",
"end": "max",
"satellite": ", ".join,
"instrument": ", ".join,
}
)
# compute the access time for each observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
def reduce_observations(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Reduce constellation observations for a geodetic point of interest.
:param gdf: The aggregated observations
:type gdf: :class:`geopandas.GeodataFrame`
:return: An instance of :class:`geopandas.GeoDataFrame`: The data frame
with reduced observations.
:rtype: :class:`geopanadas.GeodataFrame`
"""
if len(gdf.index) == 0:
empty_df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"access": pd.Series([], dtype="timedelta64[ns]"),
"revisit": | pd.Series([], dtype="timedetla64[ns]") | pandas.Series |
import os
import errno
import joblib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.plotting import table
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
r2_score,
mean_squared_error,
)
from time import time
from calculate_stats import CalculateStats
from linear_regression import LinearRegression
from multilayer_perceptron import MultilayerPerceptron
from random_forest import RandomForest
DATASET_PATH = "../datasets/train.csv"
MODELS_DIR = "../models"
RESOURCES_PATH = "../resources/"
def clean_data(path):
data_frame = pd.read_csv(path)
# fill missing data for numeric features
numeric_features = data_frame.select_dtypes(include=[np.number])
for feature in numeric_features:
data_frame[feature].fillna(data_frame[feature].mean(), inplace=True)
# convert to numeric
non_numeric_features = data_frame.select_dtypes(exclude=[np.number])
for feature in non_numeric_features:
mapping = {value: i for i, value in enumerate(data_frame[feature].unique())}
data_frame[feature] = data_frame[feature].replace(
mapping.keys(), mapping.values()
)
# dissregard unimportant features
data_frame.drop(["Id"], axis=1, inplace=True)
save_file_name = os.path.dirname(path) + os.sep + "house_prices_cleaned.csv"
data_frame.to_csv(save_file_name, encoding="utf-8", index=False)
return save_file_name
def split_data(path):
data_frame = | pd.read_csv(path) | pandas.read_csv |
import random
import sys
sys.path.append("../utils")
from collections import Counter
import numpy as np
import pandas as pd
import torch
from data_prep import machine_translation
from sklearn.model_selection import train_test_split
from sklearn.utils import resample, shuffle
from torch.utils.data import DataLoader, TensorDataset
from transformers import (
AdamW,
AutoTokenizer,
BertForSequenceClassification,
get_linear_schedule_with_warmup,
logging,
)
from utils import language_model_preprocessing, translated_preprocessing
from train import (
accuracy_per_class,
evaluate_model,
f1_score_func,
test_model,
train_model,
)
########## HYPER-PARAMETERS ##########
SEED = 0
EPOCHS = 5
LEARNING_RATE = 2e-5
language_model_1 = "nlpaueb/bert-base-greek-uncased-v1"
language_model_2 = "xlm-roberta-base"
seq_length = 256
BATCH_SIZE = 8
use_sampling = True
classes = 3
######################################
# Control sources of randomness
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
logging.set_verbosity_error()
# Training device
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print(f"Device for training: {device}")
# Load dataset
dataset = pd.read_csv("../data/dataset.csv", index_col=False, sep="\t")
# Map labels - convert to 3-classes problem
labels_dict = {"-2": 0, "-1": 0, "0": 1, "1": 2, "2": 2}
dataset["sentiment"] = dataset["sentiment"].astype(int).astype(str)
dataset["sentiment"] = dataset["sentiment"].map(labels_dict)
dataset = dataset.reset_index(drop=True)
# Dataset Preprocessing
dataset["text"] = dataset["text"].apply(language_model_preprocessing)
dataset["target"] = dataset["target"].apply(language_model_preprocessing)
# Discard rows where aspect is not in text
ids_to_drop = []
for index, row in dataset.iterrows():
if row["target"] not in row["text"]:
ids_to_drop.append(index)
dataset = dataset[~dataset.index.isin(ids_to_drop)]
dataset = dataset.reset_index(drop=True)
# Shuffle dataset
dataset = shuffle(dataset, random_state=SEED)
# Train-test split
train_data, test_data = train_test_split(
dataset, test_size=0.2, random_state=SEED, stratify=dataset["sentiment"].values
)
# Validation set
test_data, val_data = train_test_split(
test_data, train_size=0.5, random_state=SEED, stratify=test_data["sentiment"].values
)
print(f"Initial train-set class balance: {Counter(train_data['sentiment'])}")
print(f"Val-set class balance: {Counter(val_data['sentiment'])}")
print(f"Test-set class balance: {Counter(test_data['sentiment'])}")
if use_sampling:
m_0 = train_data[train_data["sentiment"] == 0] # 1671 samples
m_1 = train_data[train_data["sentiment"] == 1] # 4720 samples
m_2 = train_data[train_data["sentiment"] == 2] # 752 samples
m_2_fr = machine_translation(m_2, "mul", "en")
m_2 = pd.concat([m_2, m_2_fr])
m_2_fi = machine_translation(m_2, "el", "fr")
m_2 = pd.concat([m_2, m_2_fi])
m_0_fr = machine_translation(m_0, "mul", "en")
m_0 = | pd.concat([m_0, m_0_fr]) | pandas.concat |
# -*- coding: utf-8 -*-
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
def test_lattice():
df = pd.DataFrame(data_lattice)
filename = os.path.join(
testdir, "tabula/icdar2013-dataset/competition-dataset-us/us-030.pdf"
)
tables = camelot.read_pdf(filename, pages="2")
assert_frame_equal(df, tables[0].df)
def test_lattice_table_rotated():
df = pd.DataFrame(data_lattice_table_rotated)
filename = os.path.join(testdir, "clockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
| assert_frame_equal(df, tables[0].df) | pandas.testing.assert_frame_equal |
import os
import pandas as pd
import glob
import pydicom
import numpy as np
from utilities.defines import TRAIN_DIR_STAGE_2
def get_sequence_clipping_order(seq_length):
indices = []
elem = 0
for idx, i in enumerate(reversed(range(seq_length))):
indices.append(elem)
if idx % 2 == 0:
elem += i
else:
elem -= i
return indices
def print_error(message):
c_red = '\033[95m'
c_end = '\033[0m'
print(c_red + message + c_end)
def get_csv_train(data_prefix=TRAIN_DIR_STAGE_2):
train_df = pd.read_csv(os.path.join(data_prefix, 'stage_2_train.csv'))
train_df[['ID', 'subtype']] = train_df['ID'].str.rsplit('_', 1,
expand=True)
train_df = train_df.rename(columns={'ID': 'id', 'Label': 'label'})
train_df = pd.pivot_table(train_df, index='id',
columns='subtype', values='label')
train_df.to_csv("labels_2.csv")
return train_df
def extract_csv_partition():
df = get_csv_train()
meta_data_train = combine_labels_metadata(TRAIN_DIR_STAGE_2)
negative, positive = df.loc[df['any'] == 0], df.loc[df['any'] == 1]
negative_study_uids = list(meta_data_train.query("any == 0")['StudyInstanceUID'])
indices = np.arange(min(len(negative_study_uids), len(positive.index)))
np.random.shuffle(indices)
negative_study_uids = np.array(negative_study_uids)[indices]
selected_negative_studies = meta_data_train.loc[meta_data_train['StudyInstanceUID'].isin(negative_study_uids)]
selected_negative_studies = selected_negative_studies.drop(
set(selected_negative_studies.columns).intersection(set(negative.columns)), axis=1)
negative = negative.merge(selected_negative_studies, how='left', on='id').dropna()
negative = negative.drop(selected_negative_studies.columns, axis=1)
return pd.concat([positive, negative])
def extract_metadata(data_prefix=TRAIN_DIR_STAGE_2):
filenames = glob.glob(os.path.join(data_prefix, "*.dcm"))
get_id = lambda p: os.path.splitext(os.path.basename(p))[0]
ids = map(get_id, filenames)
dcms = map(pydicom.dcmread, filenames)
columns = ['BitsAllocated', 'BitsStored', 'Columns', 'HighBit',
'Modality', 'PatientID', 'PhotometricInterpretation',
'PixelRepresentation', 'RescaleIntercept', 'RescaleSlope',
'Rows', 'SOPInstanceUID', 'SamplesPerPixel', 'SeriesInstanceUID',
'StudyID', 'StudyInstanceUID', 'ImagePositionPatient',
'ImageOrientationPatient', 'PixelSpacing']
meta_dict = {col: [] for col in columns}
for img in dcms:
for col in columns:
meta_dict[col].append(getattr(img, col))
meta_df = | pd.DataFrame(meta_dict) | pandas.DataFrame |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
from autots.models.model_list import no_shared
from autots.tools.impute import fill_median
horizontal_aliases = ['horizontal', 'probabilistic']
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal"
def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
model_id (str): name of model to find series for
series_id (str): name of series to find models for
Returns:
list
"""
if model_id is None and series_id is None:
raise ValueError(
"either series_id or model_id must be specified in parse_horizontal."
)
if mosaic_or_horizontal(all_series) == 'mosaic':
if model_id is not None:
return [ser for ser, mod in all_series.items() if model_id in mod.values()]
else:
return list(set(all_series[series_id].values()))
else:
if model_id is not None:
return [ser for ser, mod in all_series.items() if mod == model_id]
else:
# list(set([mod for ser, mod in all_series.items() if ser == series_id]))
return [all_series[series_id]]
def BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime: dict,
prediction_interval: float = 0.9,
):
"""Generate mean forecast for ensemble of models.
Args:
ensemble_params (dict): BestN ensemble param dict
should have "model_weights": {model_id: weight} where 1 is default weight per model
forecasts (dict): {forecast_id: forecast dataframe} for all models
same for lower_forecasts, upper_forecasts
forecast_runtime (dict): dictionary of {forecast_id: timedelta of runtime}
prediction_interval (float): metadata on interval
"""
startTime = datetime.datetime.now()
forecast_keys = list(forecasts.keys())
model_weights = dict(ensemble_params.get("model_weights", {}))
ensemble_params['model_weights'] = model_weights
ensemble_params['models'] = {
k: v
for k, v in dict(ensemble_params.get('models')).items()
if k in forecast_keys
}
model_count = len(forecast_keys)
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
model_divisor = 0
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
current_weight = float(model_weights.get(idx, 1))
ens_df = ens_df + (x * current_weight)
# also .get(idx, 0)
ens_df_lower = ens_df_lower + (lower_forecasts[idx] * current_weight)
ens_df_upper = ens_df_upper + (upper_forecasts[idx] * current_weight)
model_divisor = model_divisor + current_weight
ens_df = ens_df / model_divisor
ens_df_lower = ens_df_lower / model_divisor
ens_df_upper = ens_df_upper / model_divisor
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
X = fill_median(X)
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def mosaic_classifier(df_train, known):
"""CLassify unknown series with the appropriate model for mosaic ensembles."""
known.index.name = "forecast_period"
upload = pd.melt(
known,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
upload['forecast_period'] = upload['forecast_period'].astype(int)
missing_cols = df_train.columns[
~df_train.columns.isin(upload['series_id'].unique())
]
if not missing_cols.empty:
forecast_p = np.arange(upload['forecast_period'].max() + 1)
p_full = np.tile(forecast_p, len(missing_cols))
missing_rows = pd.DataFrame(
{
'forecast_period': p_full,
'series_id': np.repeat(missing_cols.values, len(forecast_p)),
'model_id': np.nan,
},
index=None if len(p_full) > 1 else [0],
)
upload = pd.concat([upload, missing_rows])
X = fill_median(
(summarize_series(df_train).transpose()).merge(
upload, left_index=True, right_on="series_id"
)
)
X.set_index("series_id", inplace=True) # .drop(columns=['series_id'], inplace=True)
to_predict = X[X['model_id'].isna()].drop(columns=['model_id'])
X = X[~X['model_id'].isna()]
Y = X['model_id']
Xf = X.drop(columns=['model_id'])
# from sklearn.linear_model import RidgeClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(Xf, Y)
predicted = clf.predict(to_predict)
result = pd.concat(
[to_predict.reset_index(drop=False), pd.Series(predicted, name="model_id")],
axis=1,
)
cols_needed = ['model_id', 'series_id', 'forecast_period']
final = pd.concat(
[X.reset_index(drop=False)[cols_needed], result[cols_needed]], sort=True, axis=0
)
final['forecast_period'] = final['forecast_period'].astype(str)
final = final.pivot(values="model_id", columns="series_id", index="forecast_period")
try:
final = final[df_train.columns]
if final.isna().to_numpy().sum() > 0:
raise KeyError("NaN in mosaic generalization")
except KeyError as e:
raise ValueError(
f"mosaic_classifier failed to generalize for all columns: {repr(e)}"
)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
# here split for mosaic or horizontal
if mosaic_or_horizontal(known_matches) == "mosaic":
# make it a dataframe
mosaicy = pd.DataFrame.from_dict(known_matches)
# remove unavailable models
mosaicy = pd.DataFrame(mosaicy[mosaicy.isin(available_models)])
# so we can fill some missing by just using a forward fill, should be good enough
mosaicy.fillna(method='ffill', limit=5, inplace=True)
mosaicy.fillna(method='bfill', limit=5, inplace=True)
if mosaicy.isna().any().any() or mosaicy.shape[1] != df_train.shape[1]:
if full_models is not None:
k2 = pd.DataFrame(mosaicy[mosaicy.isin(full_models)])
else:
k2 = mosaicy.copy()
final = mosaic_classifier(df_train, known=k2)
return final.to_dict()
else:
return mosaicy.to_dict()
else:
# remove any unavailable models
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
# test if generalization is needed
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
startTime = datetime.datetime.now()
# this is meant to fill in any failures
available_models = [mod for mod, fcs in forecasts.items() if fcs.shape[0] > 0]
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
print("No full models available for horizontal generalization!")
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
| pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.BDay())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
def test_shift_named_axis(self):
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical1(self, frame_or_series):
# GH#9416
obj = frame_or_series(["a", "b", "c", "d"], dtype="category")
rt = obj.shift(1).shift(-1)
tm.assert_equal(obj.iloc[:-1], rt.dropna())
def get_cat_values(ndframe):
# For Series we could just do ._values; for DataFrame
# we may be able to do this if we ever have 2D Categoricals
return ndframe._mgr.arrays[0]
cat = get_cat_values(obj)
sp1 = obj.shift(1)
tm.assert_index_equal(obj.index, sp1.index)
assert np.all(get_cat_values(sp1).codes[:1] == -1)
assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:])
sn2 = obj.shift(-2)
tm.assert_index_equal(obj.index, sn2.index)
assert np.all(get_cat_values(sn2).codes[-2:] == -1)
assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2])
tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories)
tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_categorical_fill_value(self, frame_or_series):
ts = frame_or_series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = frame_or_series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = r"Cannot setitem on a Categorical with a new category \(f\)"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_fill_value(self, frame_or_series):
# GH#24128
dti = date_range("1/1/2000", periods=5, freq="H")
ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti)
exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_equal(result, exp)
exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti)
result = ts.shift(2, fill_value=0.0)
tm.assert_equal(result, exp)
ts = frame_or_series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert tm.get_dtype(res) == tm.get_dtype(ts)
# retain integer dtype
obj = frame_or_series([1, 2, 3, 4, 5], index=dti)
exp = frame_or_series([0, 1, 2, 3, 4], index=dti)
result = obj.shift(1, fill_value=0)
tm.assert_equal(result, exp)
exp = frame_or_series([0, 0, 1, 2, 3], index=dti)
result = obj.shift(2, fill_value=0)
tm.assert_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self, using_array_manager):
# GH#35488
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_axis1_multiple_blocks_with_int_fill(self):
# GH#42719
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([-1, -1, 0, 1], axis=1)
expected.iloc[:, :2] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(-2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([2, 3, -1, -1], axis=1)
expected.iloc[:, -2:] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame, frame_or_series):
# TODO(2.0): remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
| tm.assert_equal(shifted, shifted3) | pandas._testing.assert_equal |
# python3
# Libraries
import os
import sys
import re
import numpy as np
import pandas as pd
from collections import Counter
from Bio import SeqIO, motifs
from Bio.Seq import Seq
from scipy.stats import pearsonr, spearmanr, kstest, entropy
# Import filenames list
file_shape, file_muts, file_logo, filename_out = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]
# Sequence logo and conservation score
TF_logo = pd.read_csv(file_logo, sep=" ", header=None, skiprows=[0])
TF_logo.pop(0)
#TF_conserve = (2 * np.max(TF_logo, axis=1) - np.sum(TF_logo, axis=1)).values
TF_conserve = entropy(TF_logo, qk=np.full(np.shape(TF_logo), fill_value=0.25), axis=1)
# Define TF length
len_tf = len(TF_conserve)
# TFBS shape distribution
DF_pos_shape = pd.read_csv(file_shape)
# TFBS mutation ref and alt distribution
DF_pos_muts = pd.read_csv(file_muts, sep="\t", index_col=None, header=None)
DF_pos_muts.columns = ["chr", "start", "end", "mut", "MAF", "pos", "kmer_xtend", "kmer"]
# 5-mer reference DF
DF_strucval_5mersheet = pd.read_csv("ref_5mers_structure.csv", index_col=0)
temp_altks = [0] * len(DF_pos_muts)
temp_alt7 = [0] * len(DF_pos_muts)
for i in range(len(temp_altks)):
temp_kmer, temp_7mer = DF_pos_muts['kmer'][i].upper(), DF_pos_muts['kmer_xtend'][i].upper()
temp_alt = DF_pos_muts['mut'][i].split(">")[1]
temp_altks[i] = temp_kmer[0:2] + temp_alt + temp_kmer[3:5]
temp_alt7[i] = temp_7mer[0:3] + temp_alt + temp_7mer[4:7]
DF_pos_muts['kmer_alt'] = temp_altks
DF_pos_muts['kmer_alt_xtend'] = temp_alt7
DF_pos_muts.index = [item.upper() for item in DF_pos_muts['kmer'].values]
DF_pos_muts_ref = DF_pos_muts.join(DF_strucval_5mersheet, how="left")
DF_pos_muts_ref.sort_values(by=["pos", "kmer"], inplace=True)
DF_pos_muts.index = DF_pos_muts['kmer_alt']
DF_pos_muts_alt = DF_pos_muts.join(DF_strucval_5mersheet, how="left")
DF_pos_muts_alt.sort_values(by=["pos", "kmer"], inplace=True)
temp_counter = Counter(DF_pos_muts_ref['pos'])
for i in range(len_tf):
if i not in temp_counter.keys():
temp_counter[i] = 0
DF_observed_mut = | pd.DataFrame([temp_counter]) | pandas.DataFrame |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == pd.Timestamp(2019, 5, 24)
assert e == pd.Timestamp(2019, 5, 24)
s, e = tm._range_from_pricing_date('ANY', '3m')
assert s == pd.Timestamp(2019, 2, 22)
assert e == pd.Timestamp(2019, 2, 24)
s, e = tm._range_from_pricing_date('ANY', '3b')
assert s == e == pd.Timestamp(2019, 5, 22)
# restore
datetime.date = gold
replace.restore()
def test_var_swap_tenors():
session = GsSession.get(Environment.DEV, token='<PASSWORD>')
replace = Replacer()
get_mock = replace('gs_quant.session.GsSession._get', Mock())
get_mock.return_value = {
'data': [
{
'dataField': 'varSwap',
'filteredFields': [
{
'field': 'tenor',
'values': ['abc', 'xyc']
}
]
}
]
}
with session:
actual = tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
assert actual == ['abc', 'xyc']
get_mock.return_value = {
'data': []
}
with pytest.raises(MqError):
with session:
tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
replace.restore()
def test_tenor_to_month():
with pytest.raises(MqError):
tm._tenor_to_month('1d')
with pytest.raises(MqError):
tm._tenor_to_month('2w')
assert tm._tenor_to_month('3m') == 3
assert tm._tenor_to_month('4y') == 48
def test_month_to_tenor():
assert tm._month_to_tenor(36) == '3y'
assert tm._month_to_tenor(18) == '18m'
def test_forward_var_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'varSwap': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'), datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_var_term(Cross('ABCDE', 'EURUSD'))
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'))
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_var_term(..., real_time=True)
replace.restore()
def _mock_var_swap_data(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
data = {
'varSwap': [1, 2, 3]
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
return out
def test_var_swap():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_data)
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=pd.date_range("2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert actual.empty
replace.restore()
def _mock_var_swap_fwd(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')] * 2)
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
d2 = {
'varSwap': [1.5, 2.5, 3.5],
'tenor': ['13m'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df2 = MarketDataResponseFrame(data=d2, index=idx)
out = pd.concat([df1, df2])
out.dataset_ids = _test_datasets
return out
def _mock_var_swap_1t(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df1.dataset_ids = _test_datasets
return df1
def test_var_swap_fwd():
# bad input
with pytest.raises(MqError):
tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', 500)
# regular
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_fwd)
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '1y', '13m']
expected = pd.Series([4.1533, 5.7663, 7.1589, 8.4410], name='varSwap',
index=pd.date_range(start="2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# no data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no data for a tenor
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_1t)
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no such tenors
tenors_mock.return_value = []
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# finish
replace.restore()
def _var_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'varSwap': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='varSwap')
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _var_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def _var_term_fwd():
idx = pd.date_range('2018-01-01', periods=2, freq='D')
def mock_var_swap(_asset, tenor, _forward_start_date, **_kwargs):
if tenor == '1m':
series = tm.ExtendedSeries([1, 2], idx, name='varSwap')
series.dataset_ids = _test_datasets
elif tenor == '2m':
series = tm.ExtendedSeries([3, 4], idx, name='varSwap')
series.dataset_ids = _test_datasets
else:
series = tm.ExtendedSeries()
series.dataset_ids = ()
return series
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.var_swap', Mock())
market_mock.side_effect = mock_var_swap
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '2m', '3m']
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'), forward_start_date='1m')
idx = pd.DatetimeIndex(['2018-02-02', '2018-03-02'], name='varSwap')
expected = pd.Series([2, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called()
replace.restore()
return actual
def test_var_term():
with DataContext('2018-01-01', '2019-01-01'):
_var_term_typical()
_var_term_empty()
_var_term_fwd()
with DataContext('2019-01-01', '2019-07-04'):
_var_term_fwd()
with DataContext('2018-01-16', '2018-12-31'):
out = _var_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.var_term(..., pricing_date=300)
def test_forward_vol():
idx = pd.DatetimeIndex([datetime.date(2020, 5, 1), datetime.date(2020, 5, 2)] * 4)
data = {
'impliedVolatility': [2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5],
'tenor': ['1m', '1m', '2m', '2m', '3m', '3m', '4m', '4m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([5.58659, 5.47723], name='forwardVol',
index=pd.to_datetime(['2020-05-01', '2020-05-02']))
with DataContext('2020-01-01', '2020-09-01'):
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
with DataContext('2020-01-01', '2020-09-01'):
actual_fx = tm.forward_vol(Cross('ABCDE', 'EURUSD'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# no data for required tenor
market_mock.reset_mock()
market_mock.return_value = MarketDataResponseFrame(data={'impliedVolatility': [2.1, 3.1, 5.1],
'tenor': ['1m', '2m', '4m']},
index=[datetime.date(2020, 5, 1)] * 3)
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol(..., '1m', '2m', tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def test_forward_vol_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'impliedVolatility': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100,
datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_vol_term(Cross('ABCDE', 'EURUSD'), tm.VolReference.SPOT, 100)
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def _vol_term_typical(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.vol_term(Index('MA123', AssetClass.Equity, '123'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _vol_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = MarketDataResponseFrame()
actual = tm.vol_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'), tm.VolReference.DELTA_CALL, 777)
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_vol_term():
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_typical(tm.VolReference.SPOT, 100)
_vol_term_typical(tm.VolReference.NORMALIZED, 4)
_vol_term_typical(tm.VolReference.DELTA_PUT, 50)
_vol_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _vol_term_typical(tm.VolReference.SPOT, 100)
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
with pytest.raises(MqError):
tm.vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.DELTA_NEUTRAL, 0)
def _vol_term_fx(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
actual = tm.vol_term(Cross('ABCDE', 'EURUSD'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def test_vol_term_fx():
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.SPOT, 50)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.NORMALIZED, 1)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.DELTA_NEUTRAL, 1)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_CALL, 50)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_PUT, 50)
def _fwd_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'forward': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.fwd_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='forward', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _fwd_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.fwd_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_fwd_term():
with DataContext('2018-01-01', '2019-01-01'):
_fwd_term_typical()
_fwd_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _fwd_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fwd_term(..., real_time=True)
def test_bucketize_price():
target = {
'7x24': [27.323461],
'offpeak': [26.004816],
'peak': [27.982783],
'7x8': [26.004816],
'2x16h': [],
'monthly': [],
'CAISO 7x24': [26.953743375],
'CAISO peak': [29.547952562499997],
'MISO 7x24': [27.076390749999998],
'MISO offpeak': [25.263605624999997],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_commod)
mock_pjm = Index('MA001', AssetClass.Commod, 'PJM')
mock_caiso = Index('MA002', AssetClass.Commod, 'CAISO')
mock_miso = Index('MA003', AssetClass.Commod, 'MISO')
with DataContext(datetime.date(2019, 5, 1), datetime.date(2019, 5, 1)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['MISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['MISO offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'CAISO'
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['CAISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['CAISO peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'PJM'
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x8')
assert_series_equal(pd.Series(target['7x8'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='2x16h')
assert_series_equal(pd.Series(target['2x16h'],
index=[],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', granularity='m', bucket='7X24')
assert_series_equal(pd.Series(target['monthly'],
index=[],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='7X24', real_time=True)
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_caiso, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', granularity='yearly')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal( | pd.Series(dtype='float64') | pandas.Series |
import pandas as pd
# json for tests - represents raw case count json from api
test_case_json = { "01/01/2021" : {"casecount" : 1956, "casecountdate":"01/01/2021", "geoarea":"State of Utah", "retrieveddate" : "03/13/2021 11:00 AM"},
"01/02/2021" : {"casecount" : 1845, "casecountdate":"01/02/2021", "geoarea":"State of Utah", "retrieveddate" : "02/03/2021 02:00 PM"} }
# pandas data frame for tests - represents what is expected from DataGetter.build_df() from the raw json above
test_casecount_df = pd.DataFrame([[1956, "01/01/2021", "State of Utah", "03/13/2021 11:00 AM"],
[1845, "01/02/2021", "State of Utah", "02/03/2021 02:00 PM"]],
columns = ['casecount','casecountdate','geoarea', 'retrieveddate'])
test_casecount_df['casecountdate'] = pd.to_datetime(test_casecount_df['casecountdate'])
test_casecount_df = test_casecount_df.sort_values('casecountdate').reset_index(drop=True)
# json to use for testing - represents raw testing json data from api
test_testdata_json = { "01/01/2021" : {"geoarea":"State of Utah", "peoplepositive": 1888, "peopletested" : 5967, "retrieveddate" : "08/04/2021 02:01 PM", "testdate" : "01/01/2021"},
"01/02/2021" : {"geoarea" : "State of Utah", "peoplepositive" :1906, "peopletested":5627, "retrieveddate": "07/30/2021 02:01 PM", "testdate" : "01/02/2021"}}
# pandas data frame for tests - represents what is expectd from DataGetter.build_df() from the raw json above
test_testing_df = pd.DataFrame([["State of Utah", 1888, 5967, "08/04/2021 02:01 PM", "01/01/2021"],
["State of Utah", 1906, 5627, "07/30/2021 02:01 PM", "01/02/2021"]],
columns = ['geoarea','peoplepositive','peopletested', 'retrieveddate','testdate'])
test_testing_df['testdate'] = pd.to_datetime(test_testing_df['testdate'])
test_testing_df = test_testing_df.sort_values('testdate').reset_index(drop=True)
# json to use for testing - represents raw icu utilization data for the top 16 hospitals provided by the api
test_icu_16_json = { "05/01/2021" : {"date":"05/01/2021", "icu-top16-hosp-covid-util":0.32,"icu-top16-hosp-total-util": 0.85,"retrieveddate":"01/03/2021 01:00 PM"},
"05/02/2021" : {"date": "05/02/2021","icu-top16-hosp-covid-util": 0.33, "icu-top16-hosp-total-util":0.86, "retrieveddate":"01/03/2021 01:00 PM"}}
# pandas data frame for tests - represents what is expected from DataGetter.build_df() from the raw json above
test_icu_df = pd.DataFrame([["01/01/2021", 0.32, 0.85,"01/03/2021 01:00 PM" ],
["01/02/2021",0.33,0.86,"01/03/2021 01:00 PM"]],
columns = ['date','icu-top16-hosp-covid-util','icu-top16-hosp-total-util', 'retrieveddate'])
test_icu_df['date'] = | pd.to_datetime(test_icu_df['date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 22 00:05:28 2018
@author: <NAME>
"""
import os
import numpy as np
import pandas as pd
from datetime import datetime as dt
import itertools
loc = 'E:\ML Projects\Champions League Predictor\Premier league Stats\TrainingDataset'
raw_data_1 = pd.read_csv(loc + '\EPL02-03.csv')
raw_data_2 = pd.read_csv(loc + '\EPL03-04.csv')
raw_data_3 = pd.read_csv(loc + '\EPL04-05.csv')
raw_data_4 = | pd.read_csv(loc + '\EPL05-06.csv') | pandas.read_csv |
# Import standard python packages
import numbers
import copy
import pandas as pd
import pathlib
import numpy as np
import sys
# EIA reports coal counties using the FIPS Codes for the county. The county can be a one, two, or three digit number.
# For standardization sake, we convert them all to a three digit number.
# This function takes one input: an array of FIPS county codes.
# This function returns one output: an array of three-digit FIPS county codes
# This function is used in the following codes: eia_coal_consumption_data.py
def convert_fips_county_three_digits(fips_codes):
fips_three = []
fips_codes = int(fips_codes)
for county_fips in fips_codes:
if len(str(int(county_fips))) == 1:
fips_three.append('00' + str(int(county_fips)))
elif len(str(int(county_fips))) == 2:
fips_three.append('0' + str(int(county_fips)))
elif len(str(int(county_fips))) == 3:
fips_three.append(str(int(county_fips)))
fips_three = pd.Series(fips_three)
fips_three = fips_three.values
return fips_three
def convert_fips_state_two_digits(fips_codes):
fips_two = []
for state_fips in fips_codes:
if len(str(int(state_fips))) == 1:
fips_two.append('0' + str(int(state_fips)))
elif len(str(int(state_fips))) == 2:
fips_two.append(str(int(state_fips)))
fips_two = pd.Series(fips_two)
fips_two = fips_two.values
return fips_two
# FIPS county codes can be one to three digits. The standard way of reporting them is to report them with three digits
# with preceding zeros. This function converts adds the preceding zeros to the county codes in an array if necessary.
# It then combines the fips code with the state abbreviation.
# This function takes two inputs: a pandas array of FIPS county codes and a pandas array of state abbreviations.
# This function returns one output: a pandas array of State Abbreviation and FIPS county codes.
# This function is used in the following codes: CFPP_fuel_data_processing_2015.py, CFPP_fuel_data_processing.py
def fips_codes_state_county_codes(fips_county_codes, state_abbreviations):
i = 0
t = 0
temp = []
state_county_codes = []
while i < len(fips_county_codes):
if isinstance(fips_county_codes.iloc[i], numbers.Number):
code = int(fips_county_codes.iloc[i])
if fips_county_codes.iloc[i] / 100 >= 1:
state_county_codes.append(state_abbreviations.iloc[i] + ', ' + str(code))
elif fips_county_codes.iloc[i] / 10 >= 1:
state_county_codes.append(state_abbreviations.iloc[i] + ', 0' + str(code))
elif fips_county_codes.iloc[i] / 1 >= 0:
state_county_codes.append(state_abbreviations.iloc[i] + ', 00' + str(code))
else:
state_county_codes.append(state_abbreviations.iloc[i] + ', ' + str(fips_county_codes.iloc[i]))
i += 1
state_county_codes = pd.Series(state_county_codes)
state_county_codes = state_county_codes.values
return state_county_codes
# EIA reports coal rank using a three letter abbreviations. COALQUAL reports everything using the full rank name.
# This function converts those three letter abbreviations to the full rank name (in all caps).
# This function takes one inputs: a pandas array of coal rank abbreviations.
# This function returns one output: a pandas array of coal ranks.
# This function is used in the following codes: CFPP_fuel_data_processing_2015.py, CFPP_fuel_data_processing.py
def rank_abbreviation_to_full_name(coal_rank_abbreviations):
i = 0
fuel_abbreviation = []
while i < len(coal_rank_abbreviations):
if coal_rank_abbreviations.iloc[i] == 'BIT':
fuel_abbreviation.append('BITUMINOUS')
elif coal_rank_abbreviations.iloc[i] == 'SUB':
fuel_abbreviation.append('SUBBITUMINOUS')
elif coal_rank_abbreviations.iloc[i] == 'LIG':
fuel_abbreviation.append('LIGNITE')
i += 1
fuel_abbreviation = pd.Series(fuel_abbreviation)
fuel_abbreviation = fuel_abbreviation.values
return fuel_abbreviation
# EIA and coal mine data includes both county names and county codes, but we need to create a merge key that has both
# these county identifiers and the relevant state. This code concatenates these functions.
# This function takes two inputs: two arrays to concatenate with a comma between them.
# This function returns one input: an array of the concatenated strings.
# This function is used in the following codes: eia_coal_consumption_data.py
def fips_code_county_name_state_concatenation(identifiers_1, identifiers_2):
concatenated_strings = []
i = 0
while i < len(identifiers_1):
if ~isinstance(identifiers_1.iloc[i], str):
identifier_1 = str(identifiers_1.iloc[i])
else:
identifier_1 = identifiers_1.iloc[i]
if ~isinstance(identifiers_2.iloc[i], str):
identifier_2 = str(identifiers_2.iloc[i])
else:
identifier_2 = identifiers_2.iloc[i]
concatenated_strings.append(identifier_1 + ", " + identifier_2)
i += 1
concatenated_strings = pd.Series(concatenated_strings)
concatenated_strings = concatenated_strings.values
return concatenated_strings
def state_county_fips_code_concatenation(identifiers_1, identifiers_2):
concatenated_strings = []
i = 0
while i < len(identifiers_1):
if ~isinstance(identifiers_1.iloc[i], str):
identifier_1 = str(identifiers_1.iloc[i])
else:
identifier_1 = identifiers_1.iloc[i]
if ~isinstance(identifiers_2.iloc[i], str):
identifier_2 = str(identifiers_2.iloc[i])
else:
identifier_2 = identifiers_2.iloc[i]
concatenated_strings.append(identifier_1 + identifier_2)
i += 1
concatenated_strings = pd.Series(concatenated_strings)
concatenated_strings = concatenated_strings.values
return concatenated_strings
def state_code_to_abbreviation(series):
state_dic = {1:"AL", 2: 'AK', 3: 'IM', 4: 'AZ', 5: 'AR', 6: 'CA', 8: 'CO', 9: 'CT', 10: 'DE', 11: 'DC', 12: 'FL', 13: 'GA', 15: 'HI', 16: 'ID', 17: 'IL', 18: 'IN', 19: 'IA', 20: 'KS', 21: 'KY', 22: 'LA', 23: 'ME', 24: 'MD', 25: 'MA', 26: 'MI', 27: 'MN', 28: 'MS', 29: 'MO', 30: 'MT', 31: 'NE', 32: 'NV', 33: 'NH', 34: 'NJ', 35: 'NM', 36: 'NY', 37: 'NC', 38: 'ND', 39: 'OH', 40: 'OK', 41: 'OR', 42: 'PA', 44: 'RI', 45: 'SC', 46: 'SD', 47: 'TN', 48: 'TX', 49: 'UT', 50: 'VT', 51: 'VA', 53: 'WA', 54: 'WV', 55: 'WI', 56: 'WY'}
i = 0
temp = []
while i < len(series):
state = state_dic[series.iloc[i]]
temp.append(state)
i = i + 1
return pd.Series(temp)
def data_filtering(dataframe, capacity, outputfile):
# Filter plants that (1) don't use coal and (2) use either imported coal (IMP) or waste coal (WC).
if type(dataframe.Fuel_Group.iloc[2]) != str:
dataframe = dataframe[dataframe.Fuel_Group == 1]
temp = ['Coal'] * len(dataframe.Fuel_Group)
fuel = pd.Series(temp)
dataframe.Fuel_Group = fuel
else:
dataframe = dataframe[dataframe.Fuel_Group == 'Coal']
#dataframe = dataframe[dataframe.Mine_County != 'IMP']
dataframe = dataframe[dataframe.Rank != 'WC']
dataframe = dataframe[(dataframe.Rank == 'BIT') | (dataframe.Rank == 'LIG') |(dataframe.Rank == 'SUB')]
#dataframe = dataframe[dataframe.Mine_County != 'IMP']
# Filter out plants that do not report a county or state that there coal is sourced from.
dataframe = dataframe[dataframe.Mine_County.notnull()]
dataframe = dataframe[dataframe.Mine_State.notnull()]
if type(dataframe.Mine_State.iloc[2]) != str:
dataframe.Mine_State = state_code_to_abbreviation(dataframe.Mine_State)
dataframe = dataframe[dataframe.Mine_State.notnull()]
#Filter out plants with capacity lower than 1MW
PlantsLessThanOne = []
Plants = dataframe.Plant_ID.unique().tolist()
PlantsLargerThanOne = copy.deepcopy(Plants)
for p in Plants:
temp = capacity[capacity.Plant_ID == p]
tempCapacity = temp.Capacity.tolist()
for c in tempCapacity:
if c < 1:
PlantsLessThanOne.append(p)
PlantsLargerThanOne.remove(p)
break
# #Do not filter out plants with boiler connecting to multiple generators
#Boilers connecting to multiple generators will be filtered out at boiler level
# PlantsLeft = []
# for p in PlantsLargerThanOne:
# temp = generator[generator.Plant_ID == p]
# tempBoiler = temp.Boiler_ID.unique().tolist()
# tempGenerator = temp.Generator_ID.unique().tolist()
# if len(tempBoiler) >= len(tempGenerator):
# PlantsLeft.append(p)
Qualifed = {'Plant_ID':PlantsLargerThanOne}
Qualifed = pd.DataFrame(Qualifed)
dataframe = Qualifed.merge(dataframe, how='left', on='Plant_ID')
dataframe = dataframe.sort_values(by = ["Month",'Plant_ID'])
dataframe = dataframe[['Month','Plant_ID', 'Rank', 'Fuel_Group', 'Mine_State', 'Mine_County',
'Quantity', 'Heat_Content']]
filteredOut = [x for x in Plants if x not in PlantsLargerThanOne]
for b in filteredOut:
dataframe = dataframe[dataframe.Plant_ID != b]
# Qualifed = {'Plant_ID':PlantsLargerThanOne}
# Qualifed = pd.DataFrame(Qualifed)
# print(Qualifed)
# print(dataframe)
# dataframe = Qualifed.merge(dataframe, how='left', on='Plant_ID')
# dataframe = dataframe.sort_values(by = ["Month",'Plant_ID'])
# dataframe = dataframe[['Month','Plant_ID', 'Rank', 'Fuel_Group', 'Mine_State', 'Mine_County',
# 'Quantity', 'Heat_Content']]
# The FIPS code can be anywhere from one digit to three digits, this function converts it to a uniform three digits and
# adds the state abbreviation to create a string uniquely identifying each county as it .
dataframe['FIPS_Code_State'] = fips_codes_state_county_codes(dataframe.Mine_County,
dataframe.Mine_State)
# EIA reports the fuel consumed using a three letter abbreviation (i.e. "BIT", "SUB", and "LIG"). COALQUAL uses the
# full name in all capital letters. This function converts the EIA code to the COALQUAL rank name.
dataframe['Rank'] = rank_abbreviation_to_full_name(dataframe.Rank)
PlantsLessThanOne = PlantsLessThanOne + [0]*(len(dataframe['Rank'])-len(PlantsLessThanOne))
dataframe["Capacity_LessThan1"] = pd.Series(PlantsLessThanOne)
print(dataframe)
dataframe.to_csv(outputfile)
def merge_generator_boiler_data(boiler_data, boiler_fuel_data, generator_generation_data, generator_capacity_data, go_between_table):
# Start by creating the plant_boiler column for the boiler data table
i = 0
plant_boiler_combination = []
while i < len(boiler_data['Plant_ID']):
plant_boiler_combination.append(str(int(boiler_data['Plant_ID'].iloc[i])) + '_' +
str(boiler_data['Boiler_ID'].iloc[i]))
i += 1
plant_boiler_combination = pd.Series(plant_boiler_combination)
boiler_data['Plant_Boiler'] = plant_boiler_combination.values
# Start by creating the plant_boiler column for the boiler data table
i = 0
plant_boiler_combination = []
while i < len(boiler_fuel_data['Plant_ID']):
plant_boiler_combination.append(str(int(boiler_fuel_data['Plant_ID'].iloc[i])) + '_' +
str(boiler_fuel_data['Boiler_ID'].iloc[i]))
i += 1
plant_boiler_combination = pd.Series(plant_boiler_combination)
boiler_fuel_data['Plant_Boiler'] = plant_boiler_combination.values
# Then create the plant_generator column for the generation data table
i = 0
plant_generator_combination = []
while i < len(generator_generation_data['Plant_ID']):
plant_generator_combination.append(str(int(generator_generation_data['Plant_ID'].iloc[i])) + '_' +
str(generator_generation_data['Generator_ID'].iloc[i]))
i += 1
plant_generator_combination = | pd.Series(plant_generator_combination) | pandas.Series |
import numpy as np
import io
import os
import psycopg2
import flask
from flask import Flask, request, jsonify, render_template, Response
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
import snscrape.modules.twitter as sntwitter
import pandas as pd
from nltk.tokenize import TweetTokenizer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from tqdm import tqdm
import string
import re as re
from bokeh.plotting import figure, output_file, show
from bokeh.embed import components
import base64
#Initializing the application name [here, the name is app]
app = Flask(__name__)
DATABASE_URL = 'postgresql://yhvwlkefgakryo:64548ee24c94aa91c69a7360e787dce102b33cf0a69a1c5aaa984831f72fbe39@ec2-54-166-37-125.compute-1.amazonaws.com:5432/d7sq0s42rmtm2j'
#Loading the model created in model.py
#model = pickle.load(open('model.pkl', 'rb'))
#Starting the app by rendering the index.html page
@app.route('/')
def home():
return render_template('index.html')
@app.route('/plot',methods=['GET'])
def plot_png():
con = psycopg2.connect('DATABASE_URL')
cur = con.cursor()
query = f"""SELECT * FROM DATABASE"""
results = pd.read_sql(query, con)
fig = Figure()
k = pd.read_csv("Twitter_stock_final_dataset.csv")
k["Date"] = pd.to_datetime(k[['Day','Month','Year']])
k.index=k.Date
A = k.groupby(by='StockName').get_group("apple")
B = k.groupby(by='StockName').get_group("microsoft")
import matplotlib.pyplot as plt
# fig = plt.figure(figsize=(20,8))
axis = fig.add_subplot(1, 1, 1)
xs = A.index
ys = A.Close
axis.plot(xs, ys)
# plt.title('Apple Stock Price')
# plt.xlabel('Year')
# plt.ylabel("Stock Price in $")
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
# my_plot_div = plot([Scatter(x=A.index, y=A.Close)], output_type='div')
return Response(output.getvalue(), mimetype='image/png')
#Calling the prediction function using the POST method
@app.route('/predict',methods=['POST'])
def predict():
since = request.form['since_date']
until = request.form['until_date']
hashtag1 = request.form['StockName']
old_df = pd.read_csv("Twitter_stock_final_dataset.csv")
old_df["Date"] = pd.to_datetime(old_df[['Day','Month','Year']])
old_df.index=old_df.Date
sc1 = StandardScaler()
sc2 = StandardScaler()
sc3 = StandardScaler()
sc4 = StandardScaler()
sc5 = StandardScaler()
old_df.iloc[:,9] = sc1.fit_transform(np.array(old_df.iloc[:,9]).reshape(-1,1))
old_df.iloc[:,8] = sc2.fit_transform(np.array(old_df.iloc[:,8]).reshape(-1,1))
old_df.iloc[:,10] = sc3.fit_transform(np.array(old_df.iloc[:,10]).reshape(-1,1))
old_df.iloc[:,11] = sc4.fit_transform(np.array(old_df.iloc[:,11]).reshape(-1,1))
old_df.iloc[:,12] = sc5.fit_transform(np.array(old_df.iloc[:,12]).reshape(-1,1))
from sklearn.preprocessing import LabelEncoder
le1 = LabelEncoder()
le2 = LabelEncoder()
le3 = LabelEncoder()
old_df.Year = le1.fit_transform(old_df.Year)
old_df.StockName = le2.fit_transform(old_df.StockName)
old_df.Day_of_week = le3.fit_transform(old_df.Day_of_week)
# print(old_df.iloc[0,:])
# d = pd.DataFrame()
# d = pd.get_dummies(old_df1.Year, prefix=None, prefix_sep='_', dummy_na=False)
# old_df1 = pd.concat([old_df1,d], axis=1)
# old_df1.drop(['Year'], axis=1, inplace=True)
# d = pd.DataFrame()
# d = pd.get_dummies(old_df1.Day_of_week, prefix=None, prefix_sep='_', dummy_na=False)
# old_df1 = pd.concat([old_df1,d], axis=1)
# old_df1.drop(['Day_of_week'], axis=1, inplace=True)
old_df.drop(["Date","Total Tweets"], axis=1, inplace=True)
X = np.array(old_df.drop(["Close"],1))
y = np.array(old_df.Close)
from sklearn.ensemble import RandomForestRegressor
rf_2 = RandomForestRegressor(bootstrap=True, max_depth=80, max_features='sqrt', min_samples_leaf=3, min_samples_split=8, n_estimators=1000, random_state=1)
rf_2.fit(X,y)
if hashtag1=="apple":
hashtag="AAPL"
elif hashtag1=="microsoft":
hashtag="MSFT"
elif hashtag1=="nvidia":
hashtag="NVDA"
elif hashtag1=="paypal":
hashtag="PYPL"
elif hashtag1=="tesla":
hashtag="TSLA"
def scraper(since, until, hashtag):
import pandas as pd
import snscrape.modules.twitter as sntwitter
tweets_list2 = []
data=pd.DataFrame()
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('#'+str(hashtag)+' since:'+str(since)+' until:'+str(until)+' lang:en').get_items()): # if i>5000:
tweets_list2.append([tweet.date, tweet.content])
# Creating a dataframe from the tweets list above
data = pd.DataFrame(tweets_list2, columns=['Datetime', 'Text'])
data["Stockname"] = str(hashtag)
date = until
return [data, date, hashtag]
data, date, hashtag = scraper(since, until, hashtag)
df = data
def split_date_time(series):
L1=[]
L2=[]
for i in range(len(series)):
date, time = str(df["Datetime"][i]).split(' ')
L1.append(date)
L2.append(time)
df_1 = pd.DataFrame()
df_1["Date"] = L1
df_1["Time"] = L2
return df_1
df_1=split_date_time(df["Datetime"])
df = df.merge(df_1, right_index=True, left_index=True)
df.drop('Datetime', axis=1, inplace=True)
df.drop('Time', axis=1, inplace=True)
def pre_process(df):
column=df["Text"]
column = column.str.lower() # Lower Case
column = column.apply(lambda x: re.sub(r'https?:\/\/\S+', '', x)) # URL links
column = column.apply(lambda x: re.sub(r"www\.[a-z]?\.?(com)+|[a-z]+\.(com)", '', x)) # URL Links
column = column.apply(lambda x: re.sub(r'{link}', ' ', x)) # Placeholders
column = column.apply(lambda x: re.sub(r"\[video\]", ' ', x)) # Placeholders
column = column.apply(lambda x: re.sub(r'&[a-z]+;', ' ', x)) # HTML Functions
column = column.apply(lambda x: re.sub(r"[^a-z\s\(\-:\)\\\/\];='#]", ' ', x)) # Non Letters
column = column.apply(lambda x: re.sub(r'@mention', ' ', x)) # Mentions
column = column.apply(lambda x: re.sub(r'\n', ' ', x)) # \n
column = column.apply(lambda x: re.sub(r'-', '', x)) # -
column = column.apply(lambda x: re.sub(r'(\s)#\w+', ' ', x)) # remove word starting from hashtag
return column
column=pre_process(df)
df["clean_text"] = column
def tokenizer(df):
column = df["clean_text"]
tknzr = TweetTokenizer()
column = column.apply(tknzr.tokenize)
PUNCUATION_LIST = list(string.punctuation)
def remove_punctuation(word_list):
"""Remove punctuation tokens from a list of tokens"""
return [w for w in word_list if w not in PUNCUATION_LIST]
df['tokens'] = column.apply(remove_punctuation)
return df
df = tokenizer(df)
import nltk
#nltk.download('stopwords')
from nltk.corpus import stopwords
def remove_stopwords(x):
return [y for y in x if y not in stopwords.words('english')]
df['temp_list1'] = df['tokens'].apply(lambda x : remove_stopwords(x))
from nltk.corpus import stopwords
stopwords = set('rt')
stopwords.update(['retweet', 'RT', 'Retweet', 'RETWEET', 'rt', 'plz','#aapl','aapl','#msft','msft', 'tsla','tesla','stock','#tsla','elonmusk','apple','#wallstreetbets','reddit','wsbchairman','aapl','#aapl','microsoft'])
l=[]
for i in df.temp_list1:
t = " ".join(review for review in i)
l.append(t)
df["temp_list2"] = l
# textt = " ".join(review for review in df.temp_list2)
sid = SentimentIntensityAnalyzer()
ss=[]
for k in tqdm(df.temp_list2):
# print(k)
ss.append(sid.polarity_scores(k))
neg=[]
pos=[]
neu=[]
compound=[]
for i in tqdm(range(df.temp_list2.shape[0])):
neg.append(ss[i]["neg"])
pos.append(ss[i]["pos"])
neu.append(ss[i]["neu"])
compound.append(ss[i]["compound"])
sia_table = | pd.DataFrame() | pandas.DataFrame |
# Import dependencies.
import json
import pandas as pd
import numpy as np
import re
from sqlalchemy import create_engine
import psycopg2
import time
from config import db_password
# Define a variable file_dir for the directory that’s holding our data.
file_dir = 'C:/Users/User/Desktop/Class/Goddard_Shannon_Movies-ETL'
# Open the Wikipedia JSON file to be read into the variable file, and use json.load() to save the data to a new variable.
with open(f'{file_dir}/data/wikipedia-movies.json', mode='r') as file:
wiki_movies_raw = json.load(file)
# Pull Kaggle data into Pandas DataFrames directly.
kaggle_metadata = pd.read_csv(f'{file_dir}/data/movies_metadata.csv', low_memory=False)
ratings = pd.read_csv(f'{file_dir}/data/ratings.csv')
# Create a list comprehension with the filter expression we created
# Save that to an intermediate variable wiki_movies.
wiki_movies = [movie for movie in wiki_movies_raw
if ('Director' in movie or 'Directed by' in movie)
and 'imdb_link' in movie
and 'No. of episodes' not in movie] # Filter TV shows out of movies.
# Create wiki_movies DataFrame.
wiki_movies_df = pd.DataFrame(wiki_movies_raw)
wiki_movies_df.head()
def clean_movie(movie):
movie = dict(movie) #create a non-destructive copy
alt_titles = {}
# combine alternate titles into one list
for key in ['Also known as','Arabic','Cantonese','Chinese','French',
'Hangul','Hebrew','Hepburn','Japanese','Literally',
'Mandarin','McCune-Reischauer','Original title','Polish',
'Revised Romanization','Romanized','Russian',
'Simplified','Traditional','Yiddish']:
if key in movie:
alt_titles[key] = movie[key]
movie.pop(key)
if len(alt_titles) > 0:
movie['alt_titles'] = alt_titles
# merge column names
def change_column_name(old_name, new_name):
if old_name in movie:
movie[new_name] = movie.pop(old_name)
change_column_name('Adaptation by', 'Writer(s)')
change_column_name('Country of origin', 'Country')
change_column_name('Directed by', 'Director')
change_column_name('Distributed by', 'Distributor')
change_column_name('Edited by', 'Editor(s)')
change_column_name('Length', 'Running time')
change_column_name('Original release', 'Release date')
change_column_name('Music by', 'Composer(s)')
change_column_name('Produced by', 'Producer(s)')
change_column_name('Producer', 'Producer(s)')
change_column_name('Productioncompanies ', 'Production company(s)')
change_column_name('Productioncompany ', 'Production company(s)')
change_column_name('Released', 'Release Date')
change_column_name('Release Date', 'Release date')
change_column_name('Screen story by', 'Writer(s)')
change_column_name('Screenplay by', 'Writer(s)')
change_column_name('Story by', 'Writer(s)')
change_column_name('Theme music composer', 'Composer(s)')
change_column_name('Written by', 'Writer(s)')
return movie
# Rerun our list comprehension to clean wiki_movies and recreate wiki_movies_df.
clean_movies = [clean_movie(movie) for movie in wiki_movies]
wiki_movies_df = | pd.DataFrame(clean_movies) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
def test_where_unsafe_int(any_signed_int_numpy_dtype):
s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(
list(range(2, 7)) + list(range(5, 10)),
dtype=any_signed_int_numpy_dtype,
)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_numpy_dtype):
s = Series(np.arange(10), dtype=float_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_numpy_dtype)
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
"dtype,expected_dtype",
[
(np.int8, np.float64),
(np.int16, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
(np.float32, np.float32),
(np.float64, np.float64),
],
)
def test_where_unsafe_upcast(dtype, expected_dtype):
# see gh-9743
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
s[mask] = values
tm.assert_series_equal(s, expected)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(list(range(6)) + values, dtype="float64")
s[mask] = values
tm.assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype="int64")
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
tm.assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype="int64")
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
tm.assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[mask] = [5, 4, 3, 2, 1]
with pytest.raises(ValueError, match=msg):
s[mask] = [0] * 5
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
tm.assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
tm.assert_series_equal(result, expected)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
tm.assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
tm.assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert s.shape == rs.shape
assert rs is not s
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
tm.assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
tm.assert_series_equal(rs, expected)
def test_where_non_keyword_deprecation():
# GH 41485
s = Series(range(5))
msg = (
"In a future version of pandas all arguments of "
"Series.where except for the arguments 'cond' "
"and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = s.where(s > 1, 10, False)
expected = Series([10, 10, 2, 3, 4])
tm.assert_series_equal(expected, result)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where(1)
with pytest.raises(ValueError, match=msg):
s.where(cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
tm.assert_series_equal(s, expected)
# failures
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[[True, False]] = [0, 2, 3]
msg = (
"NumPy boolean array indexing assignment cannot assign 0 input "
"values to the 1 output values where the mask is true"
)
with pytest.raises(ValueError, match=msg):
s[[True, False]] = []
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
],
)
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = | Series([1, 2, 3]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
#Imports
import pandas as pd
#Set the input variable
sample_input1 = '''8
2017-01-03,16:18:50,AAPL,142.64
2017-01-03,16:25:22,AMD,13.86
2017-01-03,16:25:25,AAPL,141.64
2017-01-03,16:25:28,AMZN,845.61
2017-01-03,16:28:50,AAPL,140.64
2017-01-03,16:29:59,FB,140.34
2017-01-04,16:29:32,AAPL,143.64
2017-01-04,16:30:50,AAPL,141.64'''
sample_input2 = '''10
2017-01-03,16:18:50,AAPL,142.64
2017-01-03,16:25:22,AMD,13.80
2017-01-03,16:25:22,AMD,13.88
2017-01-03,16:25:22,AMD,13.86
2017-01-03,16:25:25,AAPL,141.64
2017-01-03,16:25:28,AMZN,845.61
2017-01-03,16:28:50,AAPL,140.64
2017-01-03,16:29:59,FB,140.34
2017-01-04,16:29:32,AAPL,143.64
2017-01-04,16:30:50,AAPL,141.64'''
def pre_process_to_df(input_text):
#Drop the first line which is not required
sample_input_trimmed = input_text[input_text.find('\n')+1:]
df = pd.DataFrame([x.split(',') for x in sample_input_trimmed.split('\n')])
df.columns = ['Date','Time','Symbol','Price']
df['DateTime'] = df['Date'] + ' ' + df['Time']
df['DateTime'] = pd.to_datetime(df['DateTime'], format='%Y-%m-%d %H:%M:%S')
df.drop(columns=['Date', 'Time'])
#Drop the transactions out of 09:30:00 hrs and 16:30:00 trading window
df = (df.set_index('DateTime')
.between_time('09:30:00', '16:30:00')
.reset_index()
.reindex(columns=df.columns))
df = df.sort_values(by=['DateTime'])
return df
def calculate_day_wise_ticker(df):
day_wise_transactions = [v for k, v in df.groupby(pd.Grouper(key='DateTime',freq='D'))]
for i in day_wise_transactions:
i = i.sort_values(by=['Symbol'])
print('Trading Day = ',str(i.DateTime.max())[:10])
print('Last Quote Time = ',str(i.DateTime.max())[11::])
print('Number of valid quotes = ',i.shape[0])
print('Most active hour = ', i.DateTime.dt.hour.mode()[0])
print('Most active symbol = ',i.Symbol.mode()[0])
day_symbol_wise_transactions = [v for k, v in i.groupby( | pd.Grouper(key='Symbol') | pandas.Grouper |
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
def transform_data(dataset, window):
"""
Transform MTS dataset to tabular dataset
Parameters
----------
dataset: string
Name of the dataset
window: float
Size of the time window
Returns
-------
sets: array
Train and test sets
"""
def transform_mts_features(X_mts, X_mts_transformed, line, timestamp, mts_length, window_size, n_features):
"""Add features from previous timestamps"""
X_mts_transformed[line, (3+n_features+(timestamp-1)*n_features):(3+2*n_features+(timestamp-1)*n_features)] = X_mts[(line+window_size-1-timestamp), 3:]
return X_mts_transformed
def transform_mts_line(X_mts, X_mts_transformed, line, mts_length, window_size, n_features):
"""Transform MTS per timestamp"""
return [transform_mts_features(X_mts, X_mts_transformed, line, timestamp, mts_length, window_size, n_features) for timestamp in range(1, window_size)][0]
def transform_mts(X, mts, mts_length, window_size, n_features):
"""Transform MTS"""
X_mts = np.array(X[X.id==mts])
X_mts_transformed = np.empty((mts_length-window_size+1, 3+window_size*n_features), dtype=object)
X_mts_transformed[:,:X_mts.shape[1]] = X_mts[window_size-1:,:]
X_mts_transformed = [transform_mts_line(X_mts, X_mts_transformed, line, mts_length, window_size, n_features) for line in range(0, X_mts_transformed.shape[0])]
return X_mts_transformed[0]
# Load input data
path = './data/'+dataset
df_train = pd.read_parquet(path+'/train.parquet')
df_test = pd.read_parquet(path+'/test.parquet')
# Collect input data information
mts_length = df_train.loc[:,['id', 'timestamp']].groupby(['id']).count().reset_index(drop=True).max()[0]
window_size = int(mts_length*window)
n_features = df_train.iloc[:,2:-1].shape[1]
# Transform train and test sets
train = pd.concat([df_train.target, df_train.iloc[:,:-1]], axis=1)
test = pd.concat([df_test.target, df_test.iloc[:,:-1]], axis=1)
train = [transform_mts(train, mts, mts_length, window_size, n_features) for mts in np.unique(train.id)]
train = np.concatenate(train, axis=0)
test = [transform_mts(test, mts, mts_length, window_size, n_features) for mts in np.unique(test.id)]
test = np.concatenate(test, axis=0)
# Separate X and y
X_train = train[:,1:]
y_train = train[:,0]
X_test = test[:,1:]
y_test = test[:,0]
return X_train, y_train, X_test, y_test
def load_data(dataset, window):
"""
Import train and test sets
Parameters
----------
dataset: string
Name of the dataset
window: float
Size of the time window
Returns
-------
sets: array
Train and test sets
"""
path = './data/'+dataset+'/transformed/'+str(int(window*100))
if not os.path.exists(path):
# Transform the dataset and save it
os.makedirs(path)
X_train, y_train, X_test, y_test = transform_data(dataset, window)
np.save(path+'/X_train.npy', X_train)
np.save(path+'/y_train.npy', y_train)
np.save(path+'/X_test.npy', X_test)
np.save(path+'/y_test.npy', y_test)
else:
# Load existing transformed dataset
X_train = np.load(path+'/X_train.npy', allow_pickle=True)
y_train = np.load(path+'/y_train.npy', allow_pickle=True)
X_test = np.load(path+'/X_test.npy', allow_pickle=True)
y_test = np.load(path+'/y_test.npy', allow_pickle=True)
return X_train, y_train, X_test, y_test
def import_data(dataset, window, xp_dir, val_split=[3, 1], log=print):
"""
Generate train, validation and test sets
Parameters
----------
dataset: string
Name of the dataset
window: float
Size of the time window
xp_dir: string
Folder of the experiment
val_split: array
Number of folds and the selected one
log: string
Processing of the outputs
Returns
-------
sets: array
Train, validation and test sets
"""
# Load train and test sets
X_train, y_train, X_test, y_test = load_data(dataset, window)
# Print input data information
classes, y = np.unique(y_train, return_inverse=True)
mts_length = (len(X_train)/len(np.unique(X_train[:,0]))-1)/(1-window)
window_size = window*mts_length
n_features = (X_train.shape[1]-2)/window_size
log('Number of MTS in train set: {0}'.format(len(np.unique(X_train[:,0]))))
log('Number of MTS in test set: {0}'.format(len(np.unique(X_test[:,0]))))
log('Number of classes: {0}'.format(len(classes)))
log('MTS length: {0}'.format(int(mts_length)))
log('Window size: {0}'.format(int(window_size)))
log('Number of features: {0}'.format(int(n_features)))
# Generate train/validation split
df_split = pd.concat([pd.DataFrame(X_train[:,0]), | pd.DataFrame(y) | pandas.DataFrame |
import pandas as pd
from com.designingnn.rl.State import State
class QValues:
def __init__(self):
self.q = {}
def load_q_values(self, q_csv_path):
self.q = {}
q_csv = | pd.read_csv(q_csv_path) | pandas.read_csv |
import pandas as pd
from enum import Enum
class EQUI(Enum):
EQUIVALENT = 1
DIF_CARDINALITY = 2
DIF_SCHEMA = 3
DIF_VALUES = 4
"""
UTILS
"""
def most_likely_key(df):
res = uniqueness(df)
res = sorted(res.items(), key=lambda x: x[1], reverse=True)
return res[0]
def uniqueness(df):
res = dict()
for c in df.columns:
total = len(df[c])
unique = len(df[c].unique())
uniqueness = float(unique)/float(total)
res[c] = uniqueness
return res
def curate_view(df):
df = df.dropna() # drop nan
df = df.drop_duplicates()
# this may tweak indexes, so need to reset that
df = df.reset_index(drop=True)
# make sure it's sorted according to some order
df.sort_index(inplace=True, axis=1)
df.sort_index(inplace=True, axis=0)
return df
"""
VIEW CLASSIFICATION FUNCTIONS
"""
def equivalent(v1, v2):
v1 = curate_view(v1)
v2 = curate_view(v2)
if len(v1) != len(v2):
return False, EQUI.DIF_CARDINALITY
if len(v1.columns) != len(v2.columns):
return False, EQUI.DIF_SCHEMA
if not len(set(v1.columns).intersection(set(v2.columns))) == len(v1.columns):
return False, EQUI.DIF_SCHEMA # dif attributes
for c in v1.columns:
s1 = v1[c].apply(lambda x: str(x).lower()).sort_values().reset_index(drop=True)
s2 = v2[c].apply(lambda x: str(x).lower()).sort_values().reset_index(drop=True)
idx = (s1 == s2)
if not idx.all():
return False, EQUI.DIF_VALUES
return True, EQUI.EQUIVALENT
def contained(v1, v2):
v1 = curate_view(v1)
v2 = curate_view(v2)
if len(v1) > len(v2):
l = v1
s = v2
elif len(v2) > len(v1):
l = v2
s = v1
elif len(v1) == len(v2):
for c in v1.columns:
tv1 = v1[c].apply(lambda x: str(x).lower())
tv2 = v2[c].apply(lambda x: str(x).lower())
v12 = len(set(tv1) - set(tv2))
v21 = len(set(tv2) - set(tv1))
if v12 > 0:
return False, v12
elif v21 > 0:
return False, v21
return True
for c in l.columns:
print(c)
small_set = s[c].apply(lambda x: str(x).lower())
large_set = l[c].apply(lambda x: str(x).lower())
dif = set(small_set) - set(large_set)
print(str(len(small_set)) + " - " + str(len(large_set)))
if len(dif) > 0:
return False, len(dif)
return True
def complementary(v1, v2):
v1 = curate_view(v1)
v2 = curate_view(v2)
k1 = most_likely_key(v1)[0]
k2 = most_likely_key(v2)[0]
s1 = set(v1[k1])
s2 = set(v2[k2])
s12 = (s1 - s2)
sdiff = set()
if len(s12) > 0:
sdiff.update((s12))
s21 = (s2 - s1)
if len(s21) > 0:
sdiff.update((s21))
if len(sdiff) == 0:
return False
return True, sdiff
def contradictory(v1, v2):
v1 = curate_view(v1)
v2 = curate_view(v2)
k1 = most_likely_key(v1)[0]
k2 = most_likely_key(v2)[0]
vg1 = v1.groupby([k1])
vg2 = v2.groupby([k2])
vref = None
voth = None
if len(vg1.groups) > len(vg2.groups):
vref = vg1
voth = vg2
else:
vref = vg2
voth = vg1
contradictions = []
for gn, gv in vref:
v = voth.get_group(gn)
are_equivalent, equivalency_type = equivalent(gv, v)
if not are_equivalent:
contradictions.append((k1, k2, gn))
# print(contradictions)
# break
if len(contradictions) == 0:
return False
return True, len(contradictions)
def inconsistent_value_on_key(df1, df2, key=None):
missing_keys = []
non_unique_df1 = set()
non_unique_df2 = set()
conflicting_pair = []
cols = df1.columns # should be same in both df1 and df2
for key_value in df1[key]:
row1 = df1[df1[key] == key_value]
row2 = df2[df2[key] == key_value]
if len(row1) == 0 or len(row2) == 0:
missing_keys.append(key_value)
continue
do_continue = False
if len(row1) > 1:
non_unique_df1.add(key_value)
do_continue = True
if len(row2) > 1:
non_unique_df2.add(key_value)
do_continue = True
if do_continue:
continue
for c in cols:
if len(row1[c]) > 0 and len(row2[c]) > 0:
val_1 = row1[c].values
val_2 = row2[c].values
if val_1 != val_2 and not pd.isnull(val_1) and not | pd.isnull(val_2) | pandas.isnull |
# -*- coding: utf-8 -*-
import pandas
import numpy
import sys
import unittest
from datetime import datetime
from pandas.testing import assert_frame_equal, assert_series_equal
import os
import copy
sys.path.append("..")
import warnings
import nPYc
from nPYc.enumerations import SampleType
from nPYc.enumerations import AssayRole
from nPYc.enumerations import VariableType
from generateTestDataset import generateTestDataset
import tempfile
from isatools import isatab
class test_msdataset_synthetic(unittest.TestCase):
"""
Test MSDataset object functions with synthetic data
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata = pandas.DataFrame(
{'Sample File Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'Sample Base Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'AssayRole': [AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference],
'SampleType': [SampleType.StudySample, SampleType.StudyPool, SampleType.ExternalReference],
'Sample Name': ['Sample1', 'Sample2', 'Sample3'], 'Acqu Date': ['26-May-17', '26-May-17', '26-May-17'],
'Acqu Time': ['16:42:57', '16:58:49', '17:14:41'], 'Vial': ['1:A,1', '1:A,2', '1:A,3'],
'Instrument': ['XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest'],
'Acquired Time': [datetime(2017, 5, 26, 16, 42, 57), datetime(2017, 5, 26, 16, 58, 49),
datetime(2017, 5, 26, 17, 14, 41)], 'Run Order': [0, 1, 2], 'Batch': [1, 1, 2],
'Correction Batch': [numpy.nan, 1, 2], 'Matrix': ['U', 'U', 'U'],
'Subject ID': ['subject1', 'subject1', 'subject2'], 'Sample ID': ['sample1', 'sample2', 'sample3'],
'Dilution': [numpy.nan, '60.0', '100.0'],'Exclusion Details': ['','','']})
self.msData.featureMetadata = pandas.DataFrame(
{'Feature Name': ['Feature1', 'Feature2', 'Feature3'], 'Retention Time': [6.2449, 2.7565, 5.0564],
'm/z': [249.124281, 381.433191, 471.132083]})
self.msData.featureMetadata['Exclusion Details'] = None
self.msData.featureMetadata['User Excluded'] = False
self.msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.msData.featureMetadata.index)
self.msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.msData.featureMetadata.index)
self.msData._intensityData = numpy.array([[10.2, 20.95, 30.37], [10.1, 20.03, 30.74], [3.065, 15.83, 30.16]])
# Attributes
self.msData.Attributes['FeatureExtractionSoftware'] = 'UnitTestSoftware'
# excluded data
self.msData.sampleMetadataExcluded = []
self.msData.intensityDataExcluded = []
self.msData.featureMetadataExcluded = []
self.msData.excludedFlag = []
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[0, :])
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata)
self.msData.excludedFlag.append('Samples')
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[:, 0])
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata)
self.msData.excludedFlag.append('Features')
# finish
self.msData.VariableType = VariableType.Discrete
self.msData.initialiseMasks()
def test_rsd_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='No reference samples'):
msData.sampleMetadata = pandas.DataFrame(None)
with self.assertRaises(ValueError):
msData.rsdSP
with self.subTest(msg='Only one reference sample'):
msData.sampleMetadata = pandas.DataFrame([[nPYc.enumerations.AssayRole.PrecisionReference, nPYc.enumerations.SampleType.StudyPool]], columns=['AssayRole', 'SampleType'])
with self.assertRaises(ValueError):
msData.rsdSP
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
msData = nPYc.MSDataset('', fileType='empty')
msData.sampleMetadata['Sample File Name'] = ['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02_x',
'Test2_RPOS_ToF02_U2W03_b',
'Test3_RNEG_ToF03_S3W04_2',
'Test4_RPOS_ToF04_B1S1_SR_q',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01_9',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21']
msData._getSampleMetadataFromFilename(msData.Attributes['filenameSpec'])
##
# Check basename
##
basename = pandas.Series(['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_RPOS_ToF04_B1S1_SR',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21'],
name='Sample Base Name',
dtype='str')
assert_series_equal(msData.sampleMetadata['Sample Base Name'], basename)
##
# Check Study
##
study = pandas.Series(['Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test1',
'Test1'],
name='Study',
dtype='str')
assert_series_equal(msData.sampleMetadata['Study'], study)
##
#
##
chromatography = pandas.Series(['H',
'R',
'R',
'L',
'L',
'H',
'H',
'R',
'R',
'R',
'L',
'L',
'H',
'H',
'H'],
name='Chromatography',
dtype='str')
assert_series_equal(msData.sampleMetadata['Chromatography'], chromatography)
##
#
##
ionisation = pandas.Series(['POS',
'POS',
'NEG',
'POS',
'NEG',
'POS',
'POS',
'POS',
'NEG',
'POS',
'POS',
'NEG',
'POS',
'POS',
'POS'],
name='Ionisation',
dtype='str')
assert_series_equal(msData.sampleMetadata['Ionisation'], ionisation)
##
#
##
instrument = pandas.Series(['ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF06',
'ToF06',
'ToF06'],
name='Instrument',
dtype='str')
assert_series_equal(msData.sampleMetadata['Instrument'], instrument)
##
#
##
reRun = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'b',
'',
'q',
'',
'',
'',
'',
''],
name='Re-Run',
dtype='str')
assert_series_equal(msData.sampleMetadata['Re-Run'], reRun)
##
#
##
suplemental = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'',
'2',
'',
'',
'9',
'',
'',
''],
name='Suplemental Injections',
dtype='str')
assert_series_equal(msData.sampleMetadata['Suplemental Injections'], suplemental)
##
#
##
skipped = pandas.Series([False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False],
name='Skipped',
dtype='bool')
assert_series_equal(msData.sampleMetadata['Skipped'], skipped)
##
#
##
matrix = pandas.Series(['P',
'U',
'S',
'P',
'U',
'S',
'P',
'U',
'S',
'',
'',
'',
'',
'',
''],
name='Matrix',
dtype='str')
assert_series_equal(msData.sampleMetadata['Matrix'], matrix)
##
#
##
well = pandas.Series([2,
3,
4,
5,
6,
5,
2,
3,
4,
1,
2,
1,
-1,
-1,
-1],
name='Well',
dtype='int')
assert_series_equal(msData.sampleMetadata['Well'], well, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Well'].dtype.kind, well.dtype.kind)
##
#
##
plate = pandas.Series([1,
2,
3,
4,
5,
4,
1,
2,
3,
1,
2,
3,
1,
2,
21],
name='Plate',
dtype='int')
assert_series_equal(msData.sampleMetadata['Plate'], plate, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Plate'].dtype.kind, well.dtype.kind)
##
#
##
batch = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
2.0,
3.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Batch',
dtype='float')
assert_series_equal(msData.sampleMetadata['Batch'], batch)
##
#
##
dilution = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(msData.sampleMetadata['Dilution'], dilution)
##
#
##
assayRole = pandas.Series([AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.Assay,
AssayRole.Assay],
name='AssayRole',
dtype=object)
assert_series_equal(msData.sampleMetadata['AssayRole'], assayRole)
##
#
##
sampleType = pandas.Series([SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.StudyPool,
SampleType.MethodReference,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.ProceduralBlank,
SampleType.StudyPool,
SampleType.StudyPool],
name='SampleType',
dtype=object)
assert_series_equal(msData.sampleMetadata['SampleType'], sampleType)
def test_updateMasks_features(self):
msData = nPYc.MSDataset('', fileType='empty')
msData.Attributes['artifactualFilter'] = True
##
# Variables:
# Good Corr, Good RSD
# Poor Corr, Good RSD
# Good Corr, Poor RSD
# Poor Corr, Poor RSD
# Good Corr, Good RSD, below blank
##
msData.intensityData = numpy.array([[100, 23, 99, 51, 100],
[90, 54, 91, 88, 91],
[50, 34, 48, 77, 49],
[10, 66, 11, 56, 11],
[1, 12, 2, 81, 2],
[50, 51, 2, 12, 49],
[51, 47, 1, 100, 50],
[47, 50, 70, 21, 48],
[51, 49, 77, 91, 50],
[48, 49, 12, 2, 49],
[50, 48, 81, 2, 51],
[54, 53, 121, 52, 53],
[57, 49, 15, 51, 56],
[140, 41, 97, 47, 137],
[52, 60, 42, 60, 48],
[12, 48, 8, 56, 12],
[1, 2, 1, 1.21, 51],
[2, 1, 1.3, 1.3, 63]],
dtype=float)
msData.sampleMetadata = pandas.DataFrame(data=[[100, 1, 1, 1, AssayRole.LinearityReference, SampleType.StudyPool],
[90, 1, 1, 2, AssayRole.LinearityReference, SampleType.StudyPool],
[50, 1, 1, 3, AssayRole.LinearityReference, SampleType.StudyPool],
[10, 1, 1, 4, AssayRole.LinearityReference, SampleType.StudyPool],
[1, 1, 1, 5, AssayRole.LinearityReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank]],
columns=['Dilution', 'Batch', 'Correction Batch', 'Well', 'AssayRole', 'SampleType'])
msData.featureMetadata = pandas.DataFrame(data=[['Feature_1', 0.5, 100., 0.3],
['Feature_2', 0.55, 100.04, 0.3],
['Feature_3', 0.75, 200., 0.1],
['Feature_4', 0.9, 300., 0.1],
['Feature_5', 0.95, 300.08, 0.1]],
columns=['Feature Name','Retention Time','m/z','Peak Width'])
msData.featureMetadata['Exclusion Details'] = None
msData.featureMetadata['User Excluded'] = False
msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=msData.featureMetadata.index)
msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=msData.featureMetadata.index)
msData.initialiseMasks()
with self.subTest(msg='Default Parameters'):
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax RSD threshold'):
expectedFeatureMask = numpy.array([True, False, True, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=90, varianceRatio=0.1, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax correlation threshold'):
expectedFeatureMask = numpy.array([True, True, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter': True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=1.1, corrThreshold=0))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='High variance ratio'):
expectedFeatureMask = numpy.array([False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=100, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(blankThreshold=0.5))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='No blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':False})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Default withArtifactualFiltering'):
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1],[3,4]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData._tempArtifactualLinkageMatrix)
with self.subTest(msg='Altered withArtifactualFiltering parameters'):
expectedArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True}, **dict(deltaMzArtifactual=300,
overlapThresholdArtifactual=0.1,
corrThresholdArtifactual=0.2))
self.assertEqual(msData.Attributes['filterParameters']['deltaMzArtifactual'], 300)
self.assertEqual(msData.Attributes['filterParameters']['overlapThresholdArtifactual'], 0.1)
self.assertEqual(msData.Attributes['filterParameters']['corrThresholdArtifactual'], 0.2)
assert_frame_equal(expectedArtifactualLinkageMatrix, msData._artifactualLinkageMatrix)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=False'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = False
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': False, 'blankFilter': True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData2.featureMask)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=True'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = True
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0, 1], [3, 4]], columns=['node1', 'node2'])
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData2._tempArtifactualLinkageMatrix)
def test_updateMasks_samples(self):
from nPYc.enumerations import VariableType, DatasetLevel, AssayRole, SampleType
msData = nPYc.MSDataset('', fileType='empty')
msData.intensityData = numpy.zeros([18, 5],dtype=float)
msData.sampleMetadata['AssayRole'] = pandas.Series([AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference],
name='AssayRole',
dtype=object)
msData.sampleMetadata['SampleType'] = pandas.Series([SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.MethodReference],
name='SampleType',
dtype=object)
with self.subTest(msg='Default Parameters'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False)
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export SP and ER'):
expectedSampleMask = numpy.array([False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False, True, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool, SampleType.ExternalReference],
assayRoles=[AssayRole.PrecisionReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export Dilution Samples only'):
expectedSampleMask = numpy.array([True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool],
assayRoles=[AssayRole.LinearityReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
def test_updateMasks_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='Correlation'):
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=-1.01))
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(corrThreshold='0.7'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Blanks'):
self.assertRaises(TypeError, msData.updateMasks, **dict(blankThreshold='A string'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Variance Ratio'):
self.assertRaises(TypeError, msData.updateMasks, **dict(varianceRatio='1.1'))
with self.subTest(msg='ArtifactualParameters'):
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':'A string', 'rsdFilter':False, 'blankFilter': False,
'correlationToDilutionFilter':False, 'varianceRatioFilter':False}, **dict(blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=1.01, blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=-0.01, blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual='0.7', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(deltaMzArtifactual='100', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(overlapThresholdArtifactual='0.5', blankThreshold=False))
def test_applyMasks(self):
fit = numpy.random.randn(self.msData.noSamples, self.msData.noFeatures)
self.msData.fit = copy.deepcopy(fit)
deletedFeatures = numpy.random.randint(0, self.msData.noFeatures, size=2)
self.msData.featureMask[deletedFeatures] = False
fit = numpy.delete(fit, deletedFeatures, 1)
self.msData.applyMasks()
numpy.testing.assert_array_almost_equal(self.msData.fit, fit)
def test_correlationToDilution(self):
from nPYc.utilities._internal import _vcorrcoef
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset', sop='GenericMS')
dataset.sampleMetadata['SampleType'] = nPYc.enumerations.SampleType.StudyPool
dataset.sampleMetadata['AssayRole'] = nPYc.enumerations.AssayRole.LinearityReference
dataset.sampleMetadata['Well'] = 1
dataset.sampleMetadata['Dilution'] = numpy.linspace(1, noSamp, num=noSamp)
correlations = dataset.correlationToDilution
with self.subTest(msg='Checking default path'):
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
with self.subTest(msg='Checking corr exclusions'):
dataset.corrExclusions = None
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
def test_correlateToDilution_raises(self):
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset')
with self.subTest(msg='Unknown correlation type'):
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution, method='unknown')
with self.subTest(msg='No LR samples'):
dataset.sampleMetadata['AssayRole'] = AssayRole.Assay
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution)
with self.subTest(msg='No Dilution field'):
dataset.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)
self.assertRaises(KeyError, dataset._MSDataset__correlateToDilution)
def test_validateObject(self):
with self.subTest(msg='validateObject successful on correct dataset'):
goodDataset = copy.deepcopy(self.msData)
self.assertEqual(goodDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True), {'Dataset': True, 'BasicMSDataset':True ,'QC':True, 'sampleMetadata':True})
with self.subTest(msg='BasicMSDataset fails on empty MSDataset'):
badDataset = nPYc.MSDataset('', fileType='empty')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset':False ,'QC':False, 'sampleMetadata':False})
with self.subTest(msg='check raise no warnings with raiseWarning=False'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 0)
with self.subTest(msg='check fail and raise warnings on bad Dataset'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'featureMetadata')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': False, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 5)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.featureMetadata'" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to Dataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not conform to basic MSDataset" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have QC parameters" in str(w[3].message)
assert issubclass(w[4].category, UserWarning)
assert "Does not have sample metadata information" in str(w[4].message)
with self.subTest(msg='check raise warnings BasicMSDataset'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 4)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.Attributes['rtWindow']" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to basic MSDataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have QC parameters" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have sample metadata information" in str(w[3].message)
with self.subTest(msg='check raise warnings QC parameters'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 3)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata['Batch']' is <class 'str'>" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have QC parameters:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[2].message)
with self.subTest(msg='check raise warnings sampleMetadata'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 2)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata' lacks a 'Subject ID' column" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[1].message)
with self.subTest(msg='self.Attributes[\'rtWindow\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rtWindow\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rtWindow'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'msPrecision\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['msPrecision']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'msPrecision\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['msPrecision'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'varianceRatio\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['varianceRatio']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'varianceRatio\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['varianceRatio'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'blankThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['blankThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'blankThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['blankThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrMethod\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrMethod']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrMethod\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrMethod'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'rsdThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rsdThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rsdThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rsdThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'deltaMzArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['deltaMzArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'deltaMzArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['deltaMzArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'overlapThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['overlapThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'overlapThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['overlapThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'FeatureExtractionSoftware\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['FeatureExtractionSoftware']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'FeatureExtractionSoftware\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['FeatureExtractionSoftware'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Raw Data Path\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Raw Data Path']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Raw Data Path\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Raw Data Path'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Feature Names\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Feature Names']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Feature Names\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Feature Names'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.VariableType is not an enum VariableType'):
badDataset = copy.deepcopy(self.msData)
badDataset.VariableType = 'not an enum'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.corrExclusions does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'corrExclusions')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._correlationToDilution does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_correlationToDilution')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._correlationToDilution is not a numpy.ndarray'):
badDataset = copy.deepcopy(self.msData)
badDataset._correlationToDilution = 'not a numpy.ndarray'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._artifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_artifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._artifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._artifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._tempArtifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_tempArtifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._tempArtifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._tempArtifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.fileName does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'fileName')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.fileName is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.fileName = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.filePath does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'filePath')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.filePath is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.filePath = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample File Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample File Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'AssayRole\'] is not an enum \'AssayRole\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['AssayRole'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'SampleType\'] is not an enum \'SampleType\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['SampleType'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Dilution\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Dilution'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Correction Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Correction Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Run Order\'] is not an int'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Run Order'] = 'not an int'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Acquired Time\'] is not a datetime'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Acquired Time'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample Base Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample Base Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Matrix column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Matrix'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Matrix\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Matrix'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Subject ID column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Subject ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Subject ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not unique'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = ['Feature1','Feature1','Feature1']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a m/z column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['m/z'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'m/z\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['m/z'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a Retention Time column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['Retention Time'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Retention Time\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Retention Time'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
class test_msdataset_batch_inference(unittest.TestCase):
"""
Check batches are generated and amended correctly
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata['Sample File Name'] = ['Test_RPOS_ToF04_B1S1_SR',
'Test_RPOS_ToF04_B1S2_SR',
'Test_RPOS_ToF04_B1S3_SR',
'Test_RPOS_ToF04_B1S4_SR',
'Test_RPOS_ToF04_B1S5_SR',
'Test_RPOS_ToF04_P1W01',
'Test_RPOS_ToF04_P1W02_SR',
'Test_RPOS_ToF04_P1W03',
'Test_RPOS_ToF04_B1E1_SR',
'Test_RPOS_ToF04_B1E2_SR',
'Test_RPOS_ToF04_B1E3_SR',
'Test_RPOS_ToF04_B1E4_SR',
'Test_RPOS_ToF04_B1E5_SR',
'Test_RPOS_ToF04_B2S1_SR',
'Test_RPOS_ToF04_B2S2_SR',
'Test_RPOS_ToF04_B2S3_SR',
'Test_RPOS_ToF04_B2S4_SR',
'Test_RPOS_ToF04_B2S5_SR',
'Test_RPOS_ToF04_P2W01',
'Test_RPOS_ToF04_P2W02_SR',
'Test_RPOS_ToF04_P3W03',
'Test_RPOS_ToF04_B2S1_SR_2',
'Test_RPOS_ToF04_B2S2_SR_2',
'Test_RPOS_ToF04_B2S3_SR_2',
'Test_RPOS_ToF04_B2S4_SR_2',
'Test_RPOS_ToF04_B2S5_SR_2',
'Test_RPOS_ToF04_P3W03_b',
'Test_RPOS_ToF04_B2E1_SR',
'Test_RPOS_ToF04_B2E2_SR',
'Test_RPOS_ToF04_B2E3_SR',
'Test_RPOS_ToF04_B2E4_SR',
'Test_RPOS_ToF04_B2E5_SR',
'Test_RPOS_ToF04_B2SRD1']
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData.sampleMetadata['Run Order'] = self.msData.sampleMetadata.index + 1
def test_fillbatches_correctionbatch(self):
self.msData._fillBatches()
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_fillbatches_warns(self):
self.msData.sampleMetadata.drop('Run Order', axis=1, inplace=True)
self.assertWarnsRegex(UserWarning, 'Unable to infer batches without run order, skipping\.', self.msData._fillBatches)
def test_amendbatches(self):
"""
"""
self.msData._fillBatches()
self.msData.amendBatches(20)
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_msdataset_addsampleinfo_batches(self):
self.msData.addSampleInfo(descriptionFormat='Batches')
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
class test_msdataset_import_undefined(unittest.TestCase):
"""
Test we raise an error when passing an fileType we don't understand.
"""
def test_raise_notimplemented(self):
self.assertRaises(NotImplementedError, nPYc.MSDataset, os.path.join('nopath'), fileType='Unknown filetype')
class test_msdataset_import_QI(unittest.TestCase):
"""
Test import from QI csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (115, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_Blank01', 'UnitTest1_LPOS_ToF02_Blank02',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W08_x',
'UnitTest1_LPOS_ToF02_S1W11_LTR', 'UnitTest1_LPOS_ToF02_S1W12_SR',
'UnitTest1_LPOS_ToF02_ERROR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking Peak Widths'):
peakWidth = pandas.Series([0.03931667,
0.01403333,
0.01683333,
0.01683333],
name='Peak Width',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Peak Width'], peakWidth)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378339,
293.1811941,
145.0686347,
258.1033447],
name='m/z',
dtype='float')
assert_series_equal(self.msData.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485,
3.17485,
3.17485,
3.17485],
name='Retention Time',
dtype='float')
| assert_series_equal(self.msData.featureMetadata['Retention Time'], rt) | pandas.testing.assert_series_equal |
import unittest
from hft.backtesting.backtest import Backtest, BacktestOnSample
from hft.backtesting.strategy import CalmStrategy
from hft.backtesting.readers import OrderbookReader
from hft.environment import sampler
import shutil
import pandas as pd
class SamplerTest(unittest.TestCase):
def test_time(self):
dest_folder = 'time_sampled'
samplerr = sampler.TimeSampler('resources/orderbook/orderbooks.csv.gz',
'resources/orderbook/trades.csv.gz',
dest_folder, 120, nrows=45000)
samplerr.split_samples()
df1 = pd.read_csv(f'{dest_folder}/orderbook_0.csv.gz', header=None)
df2 = pd.read_csv(f'{dest_folder}/trade_0.csv.gz', header=None)
ts1 = pd.to_datetime(df1[0]) # get timestamp column for orderbooks
ts2 = pd.to_datetime(df2[1]) # get timestamp column for trades
delta1 = abs(ts1[0] - ts2[0]).total_seconds()
delta2 = abs(ts1[len(ts1) - 1] - ts2[len(ts2) - 1]).total_seconds()
self.assertTrue(delta1 <= 2.) # shift
self.assertTrue(delta2 <= 0.3) # no shift
df1 = pd.read_csv(f'{dest_folder}/orderbook_1.csv.gz', header=None)
df2 = | pd.read_csv(f'{dest_folder}/trade_1.csv.gz', header=None) | pandas.read_csv |
from __future__ import annotations
from typing import List, Optional, Sequence, Tuple
import numpy as np
import pandas as pd
from pandas.util._decorators import Appender, Substitution
from scipy import stats
from statsmodels.iolib.summary import Summary, fmt_2cols, fmt_params
from statsmodels.iolib.table import SimpleTable
from statsmodels.regression.linear_model import OLS, RegressionResults
import arch.covariance.kernel as lrcov
from arch.typing import ArrayLike1D, ArrayLike2D, Float64Array, Literal, UnitRootTrend
from arch.unitroot._engle_granger import EngleGrangerTestResults, engle_granger
from arch.unitroot._phillips_ouliaris import (
CriticalValueWarning,
PhillipsOuliarisTestResults,
phillips_ouliaris,
)
from arch.unitroot._shared import (
KERNEL_ERR,
KERNEL_ESTIMATORS,
_check_cointegrating_regression,
_check_kernel,
_cross_section,
)
from arch.unitroot.unitroot import SHORT_TREND_DESCRIPTION
from arch.utility.array import ensure2d
from arch.utility.io import pval_format, str_format
from arch.utility.timeseries import add_trend
from arch.vendor import cached_property
__all__ = [
"engle_granger",
"EngleGrangerTestResults",
"DynamicOLS",
"DynamicOLSResults",
"phillips_ouliaris",
"PhillipsOuliarisTestResults",
"CriticalValueWarning",
]
class _CommonCointegrationResults(object):
def __init__(
self,
params: pd.Series,
cov: pd.DataFrame,
resid: pd.Series,
kernel_est: lrcov.CovarianceEstimator,
num_x: int,
trend: UnitRootTrend,
df_adjust: bool,
r2: float,
adj_r2: float,
estimator_type: str,
):
self._params = params
self._cov = cov
self._resid = resid
self._bandwidth = kernel_est.bandwidth
self._kernel = kernel_est.__class__.__name__
self._kernel_est = kernel_est
self._num_x = num_x
self._trend = trend
self._df_adjust = df_adjust
self._ci_size = params.shape[0]
self._rsquared = r2
self._rsquared_adj = adj_r2
self._estimator_type = estimator_type
@property
def params(self) -> pd.Series:
"""The estimated parameters of the cointegrating vector"""
return self._params.iloc[: self._ci_size]
@cached_property
def std_errors(self) -> pd.Series:
"""
Standard errors of the parameters in the cointegrating vector
"""
se = np.sqrt(np.diag(self.cov))
return pd.Series(se, index=self.params.index, name="std_errors")
@cached_property
def tvalues(self) -> pd.Series:
"""
T-statistics of the parameters in the cointegrating vector
"""
return pd.Series(self.params / self.std_errors, name="tvalues")
@cached_property
def pvalues(self) -> pd.Series:
"""
P-value of the parameters in the cointegrating vector
"""
return pd.Series(2 * (1 - stats.norm.cdf(np.abs(self.tvalues))), name="pvalues")
@property
def cov(self) -> pd.DataFrame:
"""The estimated parameter covariance of the cointegrating vector"""
return self._cov.iloc[: self._ci_size, : self._ci_size]
@property
def resid(self) -> pd.Series:
"""The model residuals"""
return self._resid
@property
def kernel(self) -> str:
"""The kernel used to estimate the covariance"""
return self._kernel
@property
def bandwidth(self) -> float:
"""The bandwidth used in the parameter covariance estimation"""
return self._bandwidth
@property
def rsquared(self) -> float:
"""The model R²"""
return self._rsquared
@property
def rsquared_adj(self) -> float:
"""The degree-of-freedom adjusted R²"""
return self._rsquared_adj
@cached_property
def _cov_est(self) -> lrcov.CovarianceEstimate:
r = np.asarray(self._resid)
kern_class = self._kernel_est.__class__
bw = self._bandwidth
force_int = self._kernel_est.force_int
cov_est = kern_class(r, bandwidth=bw, center=False, force_int=force_int)
return cov_est.cov
@property
def _df_scale(self) -> float:
if not self._df_adjust:
return 1.0
nobs = self._resid.shape[0]
nvar = self.params.shape[0]
return nobs / (nobs - nvar)
@property
def residual_variance(self) -> float:
r"""
The variance of the regression residual.
Returns
-------
float
The estimated residual variance.
Notes
-----
The residual variance only accounts for the short-run variance of the
residual and does not account for any autocorrelation. It is defined
as
.. math::
\hat{\sigma}^2 = T^{-1} \sum _{t=p}^{T-q} \hat{\epsilon}_t^2
If `df_adjust` is True, then the estimator is rescaled by T/(T-m) where
m is the number of regressors in the model.
"""
return self._df_scale * self._cov_est.short_run[0, 0]
@property
def long_run_variance(self) -> float:
"""
The long-run variance of the regression residual.
Returns
-------
float
The estimated long-run variance of the residual.
The long-run variance is estimated from the model residuals
using the same kernel used to estimate the parameter
covariance.
If `df_adjust` is True, then the estimator is rescaled by T/(T-m) where
m is the number of regressors in the model.
"""
return self._df_scale * self._cov_est.long_run[0, 0]
@staticmethod
def _top_table(
top_left: Sequence[Tuple[str, str]],
top_right: Sequence[Tuple[str, str]],
title: str,
) -> SimpleTable:
stubs = []
vals = []
for stub, val in top_left:
stubs.append(stub)
vals.append([val])
table = SimpleTable(vals, txt_fmt=fmt_2cols, title=title, stubs=stubs)
fmt = fmt_2cols.copy()
fmt["data_fmts"][1] = "%18s"
top_right = [("%-21s" % (" " + k), v) for k, v in top_right]
stubs = []
vals = []
for stub, val in top_right:
stubs.append(stub)
vals.append([val])
table.extend_right(SimpleTable(vals, stubs=stubs))
return table
def _top_right(self) -> List[Tuple[str, str]]:
top_right = [
("No. Observations:", str(self._resid.shape[0])),
("R²:", str_format(self.rsquared)),
("Adjusted. R²:", str_format(self.rsquared_adj)),
("Residual Variance:", str_format(self.residual_variance)),
("Long-run Variance:", str_format(self.long_run_variance)),
("", ""),
]
return top_right
@staticmethod
def _param_table(
params: Float64Array,
se: Float64Array,
tstats: Float64Array,
pvalues: Float64Array,
stubs: Sequence[str],
title: str,
) -> SimpleTable:
ci = params[:, None] + se[:, None] * stats.norm.ppf([[0.025, 0.975]])
param_data = np.column_stack([params, se, tstats, pvalues, ci])
data = []
for row in param_data:
txt_row = []
for i, v in enumerate(row):
f = str_format
if i == 3:
f = pval_format
txt_row.append(f(v))
data.append(txt_row)
header = ["Parameter", "Std. Err.", "T-stat", "P-value", "Lower CI", "Upper CI"]
table = SimpleTable(
data, stubs=stubs, txt_fmt=fmt_params, headers=header, title=title
)
return table
def summary(self) -> Summary:
"""
Summary of the model, containing estimated parameters and std. errors
Returns
-------
Summary
A summary instance with method that support export to text, csv
or latex.
"""
if self._bandwidth != int(self._bandwidth):
bw = str_format(self._bandwidth)
else:
bw = str(int(self._bandwidth))
top_left = [
("Trend:", SHORT_TREND_DESCRIPTION[self._trend]),
("Kernel:", str(self._kernel)),
("Bandwidth:", bw),
("", ""),
("", ""),
("", ""),
]
top_right = self._top_right()
smry = Summary()
title = self._estimator_type
table = self._top_table(top_left, top_right, title)
# Top Table
# Parameter table
smry.tables.append(table)
params = np.asarray(self.params)
stubs = list(self.params.index)
se = np.asarray(self.std_errors)
tstats = np.asarray(self.tvalues)
pvalues = np.asarray(self.pvalues)
title = "Cointegrating Vector"
table = self._param_table(params, se, tstats, pvalues, stubs, title)
smry.tables.append(table)
return smry
class DynamicOLSResults(_CommonCointegrationResults):
"""
Estimation results for Dynamic OLS models
Parameters
----------
params : Series
The estimated model parameters.
cov : DataFrame
The estimated parameter covariance.
resid : Series
The model residuals.
lags : int
The number of lags included in the model.
leads : int
The number of leads included in the model.
cov_type : str
The type of the parameter covariance estimator used.
kernel_est : CovarianceEstimator
The covariance estimator instance used to estimate the parameter
covariance.
reg_results : RegressionResults
Regression results from fitting statsmodels OLS.
df_adjust : bool
Whether to degree of freedom adjust the estimator.
"""
def __init__(
self,
params: pd.Series,
cov: pd.DataFrame,
resid: pd.Series,
lags: int,
leads: int,
cov_type: str,
kernel_est: lrcov.CovarianceEstimator,
num_x: int,
trend: UnitRootTrend,
reg_results: RegressionResults,
df_adjust: bool,
) -> None:
super().__init__(
params,
cov,
resid,
kernel_est,
num_x,
trend,
df_adjust,
r2=reg_results.rsquared,
adj_r2=reg_results.rsquared_adj,
estimator_type="Dynamic OLS",
)
self._leads = leads
self._lags = lags
self._cov_type = cov_type
self._ci_size = params.shape[0] - self._num_x * (leads + lags + 1)
@property
def full_params(self) -> pd.Series:
"""The complete set of parameters, including leads and lags"""
return self._params
@property
def full_cov(self) -> pd.DataFrame:
"""
Parameter covariance of the all model parameters, incl. leads and lags
"""
return self._cov
@property
def lags(self) -> int:
"""The number of lags included in the model"""
return self._lags
@property
def leads(self) -> int:
"""The number of leads included in the model"""
return self._leads
@property
def cov_type(self) -> str:
"""The type of parameter covariance estimator used"""
return self._cov_type
@property
def _df_scale(self) -> float:
if not self._df_adjust:
return 1.0
nobs = self._resid.shape[0]
nvar = self.full_params.shape[0]
return nobs / (nobs - nvar)
def summary(self, full: bool = False) -> Summary:
"""
Summary of the model, containing estimated parameters and std. errors
Parameters
----------
full : bool, default False
Flag indicating whether to include all estimated parameters
(True) or only the parameters of the cointegrating vector
Returns
-------
Summary
A summary instance with method that support export to text, csv
or latex.
"""
if self._bandwidth != int(self._bandwidth):
bw = str_format(self._bandwidth)
else:
bw = str(int(self._bandwidth))
top_left = [
("Trend:", SHORT_TREND_DESCRIPTION[self._trend]),
("Leads:", str(self._leads)),
("Lags:", str(self._lags)),
("Cov Type:", str(self._cov_type)),
("Kernel:", str(self._kernel)),
("Bandwidth:", bw),
]
top_right = self._top_right()
smry = Summary()
typ = "Cointegrating Vector" if not full else "Model"
title = f"Dynamic OLS {typ} Summary"
table = self._top_table(top_left, top_right, title)
# Top Table
# Parameter table
smry.tables.append(table)
if full:
params = np.asarray(self.full_params)
stubs = list(self.full_params.index)
se = np.sqrt(np.diag(self.full_cov))
tstats = params / se
pvalues = 2 * (1 - stats.norm.cdf(np.abs(tstats)))
else:
params = np.asarray(self.params)
stubs = list(self.params.index)
se = np.asarray(self.std_errors)
tstats = np.asarray(self.tvalues)
pvalues = np.asarray(self.pvalues)
title = "Cointegrating Vector" if not full else "Model Parameters"
assert isinstance(se, np.ndarray)
table = self._param_table(params, se, tstats, pvalues, stubs, title)
smry.tables.append(table)
return smry
class DynamicOLS(object):
r"""
Dynamic OLS (DOLS) cointegrating vector estimation
Parameters
----------
y : array_like
The left-hand-side variable in the cointegrating regression.
x : array_like
The right-hand-side variables in the cointegrating regression.
trend : {"n","c","ct","ctt"}, default "c"
Trend to include in the cointegrating regression. Trends are:
* "n": No deterministic terms
* "c": Constant
* "ct": Constant and linear trend
* "ctt": Constant, linear and quadratic trends
lags : int, default None
The number of lags to include in the model. If None, the optimal
number of lags is chosen using method.
leads : int, default None
The number of leads to include in the model. If None, the optimal
number of leads is chosen using method.
common : bool, default False
Flag indicating that lags and leads should be restricted to the same
value. When common is None, lags must equal leads and max_lag must
equal max_lead.
max_lag : int, default None
The maximum lag to consider. See Notes for value used when None.
max_lead : int, default None
The maximum lead to consider. See Notes for value used when None.
method : {"aic","bic","hqic"}, default "bic"
The method used to select lag length when lags or leads is None.
* "aic" - Akaike Information Criterion
* "hqic" - Hannan-Quinn Information Criterion
* "bic" - Schwartz/Bayesian Information Criterion
Notes
-----
The cointegrating vector is estimated from the regression
.. math ::
Y_t = D_t \delta + X_t \beta + \Delta X_{t} \gamma
+ \sum_{i=1}^p \Delta X_{t-i} \kappa_i
+ \sum _{j=1}^q \Delta X_{t+j} \lambda_j + \epsilon_t
where p is the lag length and q is the lead length. :math:`D_t` is a
vector containing the deterministic terms, if any. All specifications
include the contemporaneous difference :math:`\Delta X_{t}`.
When lag lengths are not provided, the optimal lag length is chosen to
minimize an Information Criterion of the form
.. math::
\ln\left(\hat{\sigma}^2\right) + k\frac{c}{T}
where c is 2 for Akaike, :math:`2\ln\ln T` for Hannan-Quinn and
:math:`\ln T` for Schwartz/Bayesian.
See [1]_ and [2]_ for further details.
References
----------
.. [1] <NAME>. (1992). Estimation and testing of cointegrated
systems by an autoregressive approximation. Econometric theory,
8(1), 1-27.
.. [2] <NAME>., & <NAME>. (1993). A simple estimator of
cointegrating vectors in higher order integrated systems.
Econometrica: Journal of the Econometric Society, 783-820.
"""
def __init__(
self,
y: ArrayLike1D,
x: ArrayLike2D,
trend: UnitRootTrend = "c",
lags: Optional[int] = None,
leads: Optional[int] = None,
common: bool = False,
max_lag: Optional[int] = None,
max_lead: Optional[int] = None,
method: Literal["aic", "bic", "hqic"] = "bic",
) -> None:
setup = _check_cointegrating_regression(y, x, trend)
self._y = setup.y
self._x = setup.x
self._trend = setup.trend
self._lags = lags
self._leads = leads
self._max_lag = max_lag
self._max_lead = max_lead
self._method = method
self._common = bool(common)
self._y_df = pd.DataFrame(self._y)
self._check_inputs()
def _check_inputs(self) -> None:
"""Validate the inputs"""
if not isinstance(self._method, str) or self._method.lower() not in (
"aic",
"bic",
"hqic",
):
raise ValueError('method must be one of "aic", "bic", or "hqic"')
max_lag = self._max_lag
self._max_lag = int(max_lag) if max_lag is not None else max_lag
max_lead = self._max_lead
self._max_lead = int(max_lead) if max_lead is not None else max_lead
self._leads = int(self._leads) if self._leads is not None else self._leads
self._lags = int(self._lags) if self._lags is not None else self._lags
if self._common and self._leads != self._lags:
raise ValueError(
"common is specified but leads and lags have different values"
)
if self._common and self._max_lead != self._max_lag:
raise ValueError(
"common is specified but max_lead and max_lag have different values"
)
max_ll = self._max_lead_lag()
obs_remaining = self._y.shape[0] - 1
obs_remaining -= max_ll if max_lag is None else max_lag
obs_remaining -= max_ll if max_lead is None else max_lead
if obs_remaining <= 0:
raise ValueError(
"max_lag and max_lead are too large for the amount of "
"data. The largest model specification in the search "
"cannot be estimated."
)
def _format_variables(
self, leads: int, lags: int
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Format the variables for the regression"""
x = self._x
y = self._y_df
delta_x = x.diff()
data = [y, x]
for lag in range(-lags, leads + 1):
lag_data = delta_x.shift(-lag)
typ = "LAG" if lag < 0 else "LEAD"
lag_data.columns = [f"D.{c}.{typ}{abs(lag)}" for c in lag_data.columns]
if lag == 0:
lag_data.columns = [f"D.{c}" for c in lag_data.columns]
data.append(lag_data)
data_df: pd.DataFrame = pd.concat(data, axis=1).dropna()
lhs, rhs = data_df.iloc[:, :1], data_df.iloc[:, 1:]
nrhs = rhs.shape[1]
rhs = add_trend(rhs, trend=self._trend, prepend=True)
ntrend = rhs.shape[1] - nrhs
if ntrend:
nx = x.shape[1]
trend = rhs.iloc[:, :ntrend]
rhs = pd.concat(
[rhs.iloc[:, ntrend : ntrend + nx], trend, rhs.iloc[:, ntrend + nx :]],
axis=1,
)
return lhs, rhs
def _ic(self, resids: Float64Array, nparam: int) -> float:
"""Compute an info criterion"""
nobs = resids.shape[0]
sigma2 = float(resids.T @ resids / nobs)
if self._method == "aic":
penalty = 2.0
elif self._method == "hqic":
penalty = 2.0 * float(np.log(np.log(nobs)))
else: # bic
penalty = float(np.log(nobs))
return np.log(sigma2) + nparam * penalty / nobs
def _max_lead_lag(self) -> int:
nobs = self._y.shape[0]
return int(np.ceil(12.0 * (nobs / 100) ** (1 / 4)))
def _leads_and_lags(self) -> Tuple[int, int]:
"""Select the optimal number of leads and lags"""
if self._lags is not None and self._leads is not None:
return self._leads, self._lags
nobs = self._y.shape[0]
max_lead_lag = int(np.ceil(12.0 * (nobs / 100) ** (1 / 4)))
if self._lags is None:
max_lag = max_lead_lag if self._max_lag is None else self._max_lag
min_lag = 0
else:
min_lag = max_lag = self._lags
if self._leads is None:
max_lead = max_lead_lag if self._max_lead is None else self._max_lead
min_lead = 0
else:
min_lead = max_lead = self._leads
variables = self._format_variables(max_lead, max_lag)
lhs = np.asarray(variables[0])
rhs = np.asarray(variables[1])
nx = self._x.shape[1]
# +1 to account for the Delta X(t) (not a lead or a lag)
lead_lag_offset = rhs.shape[1] - (max_lead + max_lag + 1) * nx
always_loc = np.arange(lead_lag_offset)
best_ic = np.inf
best_leads_and_lags = (0, 0)
for lag in range(min_lag, max_lag + 1):
for lead in range(min_lead, max_lead + 1):
if self._common and lag != lead:
continue
lag_start = max_lag - lag
# +1 to get LAG0 in all regressions
lead_end = max_lag + 1 + lead
lead_lag_locs = np.arange(lag_start * nx, lead_end * nx)
lead_lag_locs += lead_lag_offset
locs = np.r_[always_loc, lead_lag_locs]
_rhs = rhs[:, locs]
params = np.linalg.lstsq(_rhs, lhs, rcond=None)[0]
resid = np.squeeze(lhs - _rhs @ params)
ic = self._ic(resid, params.shape[0])
if ic < best_ic:
best_ic = ic
best_leads_and_lags = (lead, lag)
return best_leads_and_lags
def fit(
self,
cov_type: Literal[
"unadjusted", "homoskedastic", "robust", "kernel"
] = "unadjusted",
kernel: str = "bartlett",
bandwidth: Optional[int] = None,
force_int: bool = False,
df_adjust: bool = False,
) -> DynamicOLSResults:
r"""
Estimate the Dynamic OLS regression
Parameters
----------
cov_type : str, default "unadjusted"
Either "unadjusted" (or is equivalent "homoskedastic") or "robust"
(or its equivalent "kernel").
kernel : str, default "bartlett"
The string name of any of any known kernel-based long-run
covariance estimators. Common choices are "bartlett" for the
Bartlett kernel (Newey-West), "parzen" for the Parzen kernel
and "quadratic-spectral" for the Quadratic Spectral kernel.
bandwidth : int, default None
The bandwidth to use. If not provided, the optimal bandwidth is
estimated from the data. Setting the bandwidth to 0 and using
"unadjusted" produces the classic OLS covariance estimator.
Setting the bandwidth to 0 and using "robust" produces White's
covariance estimator.
force_int : bool, default False
Whether the force the estimated optimal bandwidth to be an integer.
df_adjust : bool, default False
Whether the adjust the parameter covariance to account for the
number of parameters estimated in the regression. If true, the
parameter covariance estimator is multiplied by T/(T-k) where
k is the number of regressors in the model.
Returns
-------
DynamicOLSResults
The estimation results.
See Also
--------
arch.unitroot.cointegration.engle_granger
Cointegration testing using the Engle-Granger methodology
statsmodels.regression.linear_model.OLS
Ordinal Least Squares regression.
Notes
-----
When using the unadjusted covariance, the parameter covariance is
estimated as
.. math::
T^{-1} \hat{\sigma}^2_{HAC} \hat{\Sigma}_{ZZ}^{-1}
where :math:`\hat{\sigma}^2_{HAC}` is an estimator of the long-run
variance of the regression error and
:math:`\hat{\Sigma}_{ZZ}=T^{-1}Z'Z`. :math:`Z_t` is a vector the
includes all terms in the regression (i.e., deterministics,
cross-sectional, leads and lags) When using the robust covariance,
the parameter covariance is estimated as
.. math::
T^{-1} \hat{\Sigma}_{ZZ}^{-1} \hat{S}_{HAC} \hat{\Sigma}_{ZZ}^{-1}
where :math:`\hat{S}_{HAC}` is a Heteroskedasticity-Autocorrelation
Consistent estimator of the covariance of the regression scores
:math:`Z_t\epsilon_t`.
"""
leads, lags = self._leads_and_lags()
# TODO: Rank check and drop??
lhs, rhs = self._format_variables(leads, lags)
mod = OLS(lhs, rhs)
res = mod.fit()
coeffs = np.asarray(res.params)
resid = lhs.squeeze() - (rhs @ coeffs).squeeze()
resid.name = "resid"
cov, est = self._cov(
cov_type, kernel, bandwidth, force_int, df_adjust, rhs, resid
)
params = pd.Series(np.squeeze(coeffs), index=rhs.columns, name="params")
num_x = self._x.shape[1]
return DynamicOLSResults(
params,
cov,
resid,
lags,
leads,
cov_type,
est,
num_x,
self._trend,
res,
df_adjust,
)
@staticmethod
def _cov(
cov_type: Literal["unadjusted", "homoskedastic", "robust", "kernel"],
kernel: str,
bandwidth: Optional[int],
force_int: bool,
df_adjust: bool,
rhs: pd.DataFrame,
resids: pd.Series,
) -> Tuple[pd.DataFrame, lrcov.CovarianceEstimator]:
"""Estimate the covariance"""
kernel = kernel.lower().replace("-", "").replace("_", "")
if kernel not in KERNEL_ESTIMATORS:
raise ValueError(KERNEL_ERR)
x = np.asarray(rhs)
eps = ensure2d(np.asarray(resids), "eps")
nobs, nx = x.shape
sigma_xx = x.T @ x / nobs
sigma_xx_inv = np.linalg.inv(sigma_xx)
kernel_est = KERNEL_ESTIMATORS[kernel]
scale = nobs / (nobs - nx) if df_adjust else 1.0
if cov_type in ("unadjusted", "homoskedastic"):
est = kernel_est(eps, bandwidth, center=False, force_int=force_int)
sigma2 = np.squeeze(est.cov.long_run)
cov = (scale * sigma2) * sigma_xx_inv / nobs
elif cov_type in ("robust", "kernel"):
scores = x * eps
est = kernel_est(scores, bandwidth, center=False, force_int=force_int)
s = est.cov.long_run
cov = scale * sigma_xx_inv @ s @ sigma_xx_inv / nobs
else:
raise ValueError("Unknown cov_type")
cov_df = pd.DataFrame(cov, columns=rhs.columns, index=rhs.columns)
return cov_df, est
class CointegrationAnalysisResults(_CommonCointegrationResults):
def __init__(
self,
params: pd.Series,
cov: pd.DataFrame,
resid: pd.Series,
omega_112: float,
kernel_est: lrcov.CovarianceEstimator,
num_x: int,
trend: UnitRootTrend,
df_adjust: bool,
rsquared: float,
rsquared_adj: float,
estimator_type: str,
):
super().__init__(
params,
cov,
resid,
kernel_est,
num_x,
trend,
df_adjust,
rsquared,
rsquared_adj,
estimator_type,
)
self._omega_112 = omega_112
@property
def long_run_variance(self) -> float:
"""
Long-run variance estimate used in the parameter covariance estimator
"""
return self._omega_112
COMMON_DOCSTRING = r"""
%(method)s cointegrating vector estimation.
Parameters
----------
y : array_like
The left-hand-side variable in the cointegrating regression.
x : array_like
The right-hand-side variables in the cointegrating regression.
trend : {{"n","c","ct","ctt"}}, default "c"
Trend to include in the cointegrating regression. Trends are:
* "n": No deterministic terms
* "c": Constant
* "ct": Constant and linear trend
* "ctt": Constant, linear and quadratic trends
x_trend : {None,"c","ct","ctt"}, default None
Trends that affects affect the x-data but do not appear in the
cointegrating regression. x_trend must be at least as large as
trend, so that if trend is "ct", x_trend must be either "ct" or
"ctt".
Notes
-----
The cointegrating vector is estimated from the regressions
.. math::
Y_t & = D_{1t} \delta + X_t \beta + \eta_{1t} \\
X_t & = D_{1t} \Gamma_1 + D_{2t}\Gamma_2 + \epsilon_{2t} \\
\eta_{2t} & = \Delta \epsilon_{2t}
or if estimated in differences, the last two lines are
.. math::
\Delta X_t = \Delta D_{1t} \Gamma_1 + \Delta D_{2t} \Gamma_2 + \eta_{2t}
Define the vector of residuals as :math:`\eta = (\eta_{1t},\eta'_{2t})'`, and the
long-run covariance
.. math::
\Omega = \sum_{h=-\infty}^{\infty} E[\eta_t\eta_{t-h}']
and the one-sided long-run covariance matrix
.. math::
\Lambda_0 = \sum_{h=0}^\infty E[\eta_t\eta_{t-h}']
The covariance matrices are partitioned into a block form
.. math::
\Omega = \left[\begin{array}{cc}
\omega_{11} & \omega_{12} \\
\omega'_{12} & \Omega_{22}
\end{array} \right]
The cointegrating vector is then estimated using modified data
%(estimator)s
"""
CCR_METHOD = "Canonical Cointegrating Regression"
CCR_ESTIMATOR = r"""
.. math::
X^\star_t & = X_t - \hat{\Lambda}_2'\hat{\Sigma}^{-1}\hat{\eta}_t \\
Y^\star_t & = Y_t - (\hat{\Sigma}^{-1} \hat{\Lambda}_2 \hat{\beta}
+ \hat{\kappa})' \hat{\eta}_t
where :math:`\hat{\kappa} = (0,\hat{\Omega}_{22}^{-1}\hat{\Omega}'_{12})` and
the regression
.. math::
Y^\star_t = D_{1t} \delta + X^\star_t \beta + \eta^\star_{1t}
See [1]_ for further details.
References
----------
.. [1] <NAME>. (1992). Canonical cointegrating regressions. Econometrica:
Journal of the Econometric Society, 119-143.
"""
FMOLS_METHOD = "Fully Modified OLS"
FMOLS_ESTIMATOR = r"""
.. math::
Y^\star_t = Y_t - \hat{\omega}_{12}\hat{\Omega}_{22}\hat{\eta}_{2t}
as
.. math::
\hat{\theta} = \left[\begin{array}{c}\hat{\gamma}_1 \\ \hat{\beta} \end{array}\right]
= \left(\sum_{t=2}^T Z_tZ'_t\right)^{-1}
\left(\sum_{t=2}^t Z_t Y^\star_t -
T \left[\begin{array}{c} 0 \\ \lambda^{\star\prime}_{12}
\end{array}\right]\right)
where the bias term is defined
.. math::
\lambda^\star_{12} = \hat{\lambda}_{12}
- \hat{\omega}_{12}\hat{\Omega}_{22}\hat{\omega}_{21}
See [1]_ for further details.
References
----------
.. [1] <NAME>., & <NAME>. (1990). Estimation and inference in models of
cointegration: A simulation study. Advances in Econometrics, 8(1989), 225-248.
"""
@Substitution(method=FMOLS_METHOD, estimator=FMOLS_ESTIMATOR)
@Appender(COMMON_DOCSTRING)
class FullyModifiedOLS(object):
def __init__(
self,
y: ArrayLike1D,
x: ArrayLike2D,
trend: UnitRootTrend = "c",
x_trend: Optional[UnitRootTrend] = None,
) -> None:
setup = _check_cointegrating_regression(y, x, trend)
self._y = setup.y
self._x = setup.x
self._trend = setup.trend
self._x_trend = x_trend
self._y_df = pd.DataFrame(self._y)
def _common_fit(
self, kernel: str, bandwidth: Optional[float], force_int: bool, diff: bool
) -> Tuple[lrcov.CovarianceEstimator, Float64Array, Float64Array]:
kernel = _check_kernel(kernel)
res = _cross_section(self._y, self._x, self._trend)
x = np.asarray(self._x)
eta_1 = np.asarray(res.resid)
if self._x_trend is not None:
x_trend = self._x_trend
else:
x_trend = self._trend
tr = add_trend(nobs=x.shape[0], trend=x_trend)
if tr.shape[1] > 1 and diff:
delta_tr = np.diff(tr[:, 1:], axis=0)
delta_x = np.diff(x, axis=0)
gamma = np.linalg.lstsq(delta_tr, delta_x, rcond=None)[0]
eta_2 = delta_x - delta_tr @ gamma
else:
if tr.shape[1]:
gamma = np.linalg.lstsq(tr, x, rcond=None)[0]
eps = x - tr @ gamma
else:
eps = x
eta_2 = np.diff(eps, axis=0)
eta = np.column_stack([eta_1[1:], eta_2])
kernel = _check_kernel(kernel)
kern_est = KERNEL_ESTIMATORS[kernel]
cov_est = kern_est(eta, bandwidth=bandwidth, center=False, force_int=force_int)
beta = np.asarray(res.params)[: x.shape[1]]
return cov_est, eta, beta
def _final_statistics(self, theta: pd.Series) -> Tuple[pd.Series, float, float]:
z = add_trend(self._x, self._trend)
nobs, nvar = z.shape
resid = self._y - np.asarray(z @ theta)
resid.name = "resid"
center = 0.0
tss_df = 0
if "c" in self._trend:
center = self._y.mean()
tss_df = 1
y_centered = self._y - center
ssr = resid.T @ resid
tss = y_centered.T @ y_centered
r2 = 1.0 - ssr / tss
r2_adj = 1.0 - (ssr / (nobs - nvar)) / (tss / (nobs - tss_df))
return resid, r2, r2_adj
def fit(
self,
kernel: str = "bartlett",
bandwidth: Optional[float] = None,
force_int: bool = True,
diff: bool = False,
df_adjust: bool = False,
) -> CointegrationAnalysisResults:
"""
Estimate the cointegrating vector.
Parameters
----------
diff : bool, default False
Use differenced data to estimate the residuals.
kernel : str, default "bartlett"
The string name of any of any known kernel-based long-run
covariance estimators. Common choices are "bartlett" for the
Bartlett kernel (Newey-West), "parzen" for the Parzen kernel
and "quadratic-spectral" for the Quadratic Spectral kernel.
bandwidth : int, default None
The bandwidth to use. If not provided, the optimal bandwidth is
estimated from the data. Setting the bandwidth to 0 and using
"unadjusted" produces the classic OLS covariance estimator.
Setting the bandwidth to 0 and using "robust" produces White's
covariance estimator.
force_int : bool, default False
Whether the force the estimated optimal bandwidth to be an integer.
df_adjust : bool, default False
Whether the adjust the parameter covariance to account for the
number of parameters estimated in the regression. If true, the
parameter covariance estimator is multiplied by T/(T-k) where
k is the number of regressors in the model.
Returns
-------
CointegrationAnalysisResults
The estimation results instance.
"""
cov_est, eta, _ = self._common_fit(kernel, bandwidth, force_int, diff)
omega = np.asarray(cov_est.cov.long_run)
lmbda = np.asarray(cov_est.cov.one_sided)
omega_12 = omega[:1, 1:]
omega_22 = omega[1:, 1:]
omega_22_inv = np.linalg.inv(omega_22)
eta_2 = eta[:, 1:]
y, x = np.asarray(self._y_df), np.asarray(self._x)
y_dot = y[1:] - eta_2 @ omega_22_inv @ omega_12.T
lmbda_12 = lmbda[:1, 1:]
lmbda_22 = lmbda[1:, 1:]
lmbda_12_dot = lmbda_12 - omega_12 @ omega_22_inv @ lmbda_22
z_df = add_trend(self._x, trend=self._trend)
z_df = z_df.iloc[1:]
z = np.asarray(z_df)
zpz = z.T @ z
nobs, nvar = z.shape
bias = np.zeros((nvar, 1))
kx = x.shape[1]
bias[:kx] = lmbda_12_dot.T
zpydot = z.T @ y_dot - nobs * bias
params = np.squeeze(np.linalg.solve(zpz, zpydot))
omega_11 = omega[:1, :1]
scale = 1.0 if not df_adjust else nobs / (nobs - nvar)
omega_112 = scale * (omega_11 - omega_12 @ omega_22_inv @ omega_12.T)
zpz_inv = np.linalg.inv(zpz)
param_cov = omega_112 * zpz_inv
cols = z_df.columns
params = pd.Series(params.squeeze(), index=cols, name="params")
param_cov = | pd.DataFrame(param_cov, columns=cols, index=cols) | pandas.DataFrame |
from IPython.display import display
import pandas
from Datascrap import I_date,I_frequency,end,start,I_wordtocount,I_sentpolarity,I_sentsubjectivity,I_score,I_type
# --------------------------------------------------------------------------------#
print("Total Posts,Comments & Replies = " + str(len(I_date)) + "\n")
print("There are - " + str(sum(I_frequency)) + " mentions of " + "| " + I_wordtocount + " |" + "\n")
print("Time taken to run =" + str(end - start) + "\n")
# --------------------------------------------------------------#
# Average polarity calculations(Overall)
actualvaluespol = (len(I_sentpolarity) - (I_sentpolarity.count(0)))
sumpolarity = sum(I_sentpolarity)
avgpolarity = sumpolarity / actualvaluespol
print('Average polarity = ' + str(avgpolarity) + "\n")
# Average subjectivity calculations(Overall)
actualvaluessub = (len(I_sentsubjectivity) - (I_sentsubjectivity.count(0)))
sumsubjectivity = sum(I_sentsubjectivity)
avgsubjectivty = sumsubjectivity / actualvaluessub
print('Average Subjectivity = ' + str(avgsubjectivty))
# --------------------------------------------------------------#
# all data
data = {'Dates': I_date, 'Frequency': I_frequency, 'Sentiment_Polarity': I_sentpolarity,
'SentSubjectivity': I_sentsubjectivity, 'Score': I_score, 'Type': I_type}
table = pandas.DataFrame(data)
with pandas.option_context('display.max_rows', None, 'display.max_columns', None):
display(table)
print(table)
# --------------------------------------------------------------#
# grouped data for hourly plots
I_hourlydate = []
for date in I_date:
# I_hourlydate.append(str(date.year)+"."+ str(date.month)+"."+ str(date.day)+"-"+str(date.hour))
newdate = (str(date.year) + str(date.month) + str(date.day) + str(date.hour))
I_hourlydate.append(int(newdate))
groupeddata = {'Dates': I_hourlydate, 'Frequency': I_frequency, 'Sentiment_Polarity': I_sentpolarity,
'SentSubjectivity': I_sentsubjectivity, 'Score': I_score}
tablegrouped = pandas.DataFrame(groupeddata)
grouptedtable = tablegrouped.groupby('Dates').sum()
with | pandas.option_context('display.max_rows', None, 'display.max_columns', None) | pandas.option_context |
# -*- coding: utf-8 -*-
"""DiamondRegression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1NPXMsi1hxVlY2f0dRGNVwuoMmMSaeFWP
"""
import pandas as pd
df = pd.read_csv("diamonds.csv")
df.head()
cuts = {'Ideal': 0,'Premium': 1, 'Very Good': 2, "Good": 3, "Fair":4}
colors = dict(zip('DEFGHIJ',range(7)))
clarity = {'SI2':5, 'SI1':4, 'VS1':2, 'VS2':3, 'VVS2':1, 'VVS1':0, 'I1':6, 'IF':7}
df['cut_n'] = df['cut'].apply(lambda x: cuts[x])
df['color_n'] = df['color'].apply(lambda x: colors[x])
df['clarity_n'] = df['clarity'].apply(lambda x: clarity[x])
X = df[["depth","table", 'carat','cut_n','color_n','clarity_n']]
Y = df['price']
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
coef = reg.coef_
intercept = reg.intercept_
print(coef)
print(intercept)
pred = pd.DataFrame(reg.predict(X_test))
Y_test_df = pd.DataFrame(Y_test)
Y_test_df.reset_index(drop = True, inplace= True)
comparison = pd.concat([Y_test_df,pred], axis = 1)
comparison
pred2 = reg.predict(X_test)
err = pd.Series(Y_test) - [p[0]for p in pred2]
err.hist(bins=100)
#errors are normally distributed and symetrical
err.describe()
import statistics as stats
def rmse(errors):
return(pow(stats.mean([pow(e,2) for e in errors]),0.5))
rmse(err)
#Lets Repeat but with only the four C's
X = df[['carat','cut_n','color_n','clarity_n']]
Y = df['price']
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33)
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
coef = reg.coef_
intercept = reg.intercept_
print(coef)
print(intercept)
pred = pd.DataFrame(reg.predict(X_test))
Y_test_df = | pd.DataFrame(Y_test) | pandas.DataFrame |
import numpy as np
import pandas as pd
import seaborn as sns
import re
import string
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import hstack, csr_matrix
import lightgbm as lgbm
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import Ridge
from wordbatch.models import FM_FTRL
import gc
train_path = "../datas/train.csv"
test_path = "../datas/test.csv"
print("Read CSV.")
df_train = pd.read_csv(train_path)
df_test = | pd.read_csv(test_path) | pandas.read_csv |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Testing hwm_allocation() with bookings in natural order.
import unittest
from imscommon.es.ims_esclient import ESClient
from pyspark.sql import HiveContext
from pyspark import SparkContext, SparkConf
import optimizer.util
import pandas
from pandas.testing import assert_frame_equal
import optimizer.algo.hwm
import os
import json
import warnings
class Unittest_HWM_Allocations_2(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
fpath = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
with open(fpath + '/data_source/bookings_fully_overlapped.json') as bookings_source:
self.bookings = json.load(bookings_source)
with open(fpath + '/data_source/cfg.json') as cfg_source:
self.cfg = json.load(cfg_source)
today = '20180402'
self.days = optimizer.util.get_days_from_bookings(today, self.bookings)
self.sc = SparkContext.getOrCreate()
self.hive_context = HiveContext(self.sc)
self.schema = optimizer.util.get_common_pyspark_schema()
def compare_two_dfs(self, pandas_df_expected, df_to_test_rows):
df = self.hive_context.createDataFrame(df_to_test_rows, self.schema)
df_allocated = optimizer.algo.hwm.hwm_allocation(df, self.bookings, self.days)
pandas_df_allocated = df_allocated.select("*").toPandas()
print(pandas_df_expected)
print(pandas_df_allocated)
return self.assertTrue(assert_frame_equal(pandas_df_expected, pandas_df_allocated, check_dtype=False) == None)
def test_hwm_allocation_case1(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1', 'b3', 'b2'], [], 733, {'b1': 500, 'b3': 233}]
df_to_test_rows = [(['20180402', ['b1', 'b3', 'b2'], [], {}, 733])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case2(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1'], ['b2', 'b3'], 6047, {'b1': 500}]
df_to_test_rows = [(['20180402', ['b1'], ['b2', 'b3'], {}, 6047])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case3(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b2'], ['b1', 'b3'], 1410, {'b2': 800}]
df_to_test_rows = [(['20180402', ['b2'], ['b1', 'b3'], {}, 1410])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case4(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b3'], ['b1', 'b2'], 12241, {'b3': 1000}]
df_to_test_rows = [(['20180402', ['b3'], ['b1', 'b2'], {}, 12241])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case5(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1', 'b2'], ['b3'], 3575, {'b1': 500, 'b2': 800}]
df_to_test_rows = [(['20180402', ['b1', 'b2'], ['b3'], {}, 3575])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case6(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b2', 'b3'], ['b1'], 1002, {'b3': 1000, 'b2': 2}]
df_to_test_rows = [(['20180402', ['b2', 'b3'], ['b1'], {}, 1002])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case7(self):
pandas_df_expected = | pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated']) | pandas.DataFrame |
import pandas as pd
from scripts.python.routines.manifest import get_manifest
from scripts.python.preprocessing.serialization.routines.pheno_betas_checking import get_pheno_betas_with_common_subjects
import pathlib
from scripts.python.meta.tasks.GPL13534_Blood_ICD10_V.routines import KW_Control
from tqdm import tqdm
import numpy as np
from scripts.python.routines.plot.layout import add_layout, get_axis
from scripts.python.routines.plot.save import save_figure
import plotly.graph_objects as go
thld_above = 0.5
thld_below = 0.05
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
manifest = get_manifest('GPL13534')
dataset_statuses = {
'GSE152027': ['Control', 'Schizophrenia'],
'GSE84727': ['Control', 'Schizophrenia'],
'GSE80417': ['Control', 'Schizophrenia'],
'GSE116379': ['Control', 'Schizophrenia'],
'GSE41169': ['Control', 'Schizophrenia'],
'GSE116378': ['Control', 'Schizophrenia'],
}
datasets_train_val = ['GSE152027', 'GSE84727', 'GSE80417']
task_name = f"GPL13534_Blood_Schizo_Control"
path_wd = f"{path}/meta/tasks/{task_name}"
pathlib.Path(f"{path_wd}/tasks/011_harmonization_effect_on_controls/one_by_one").mkdir(parents=True, exist_ok=True)
pathlib.Path(f"{path_wd}/tasks/011_harmonization_effect_on_controls/all_in_one").mkdir(parents=True, exist_ok=True)
cpgs_origin = pd.read_excel(f"{path_wd}/origin/KW/cpgs_metrics.xlsx", index_col="CpG")
cpgs_one_by_one = | pd.read_excel(f"{path_wd}/one_by_one/KW/cpgs_metrics.xlsx", index_col="CpG") | pandas.read_excel |
# coding: utf-8
# # Comparing latent space arithmetic between dimensionality reduction algorithms
#
# Generative models, including variational autoencoders (VAE), have demonstrated the ability to mathematically manipulate the learned latent space to unveil intuitive features. We are interested in this ability and sought to compare alternative dimensionality reduction algorithms.
#
# The algorithms include:
#
# | | Algorithm | Acronym |
# |:-- | :------- | :-----: |
# | 1 | Principal Components Analysis | PCA |
# | 2 | Independent Components Analysis | ICA |
# | 3 | Non-negative Matrix Factorization | NMF |
# | 4 | Analysis Using Denoising Autoencoders of Gene Expression | ADAGE |
# | 5 | Tybalt (Single Layer Variational Auotencoder) | VAE |
# | 6 | Two Hidden Layer Variational Autoencoder (100 dimensions) | VAE100 |
# | 7 | Two Hidden Layer Variational Autoencoder (300 dimensions) | VAE300 |
#
#
# ## Rationale
#
# We test the ability to identify biological signals through subtraction by applying the subtraction to an unsolved problem of stratifying high grade serous ovarian cancer (HGSC) subtypes. Previous work has demonstrated that the mesenchymal subtype and immunoreactive subtype collapse into each other depending on the clustering algorithm. Therefore, we hypothesized that these subtypes actually exist on continuous activation spectra. Therefore, latent space subtraction should reveal features that best separate samples in each subtype. Moreover, these differences should consist of known differences between the subtypes.
#
# ## Approach
#
# The notebook is split into two parts. First, we perform latent feature (vector) subtraction between the _Mesenchymal_ and _Immunoreactive_ mean latent space vectors. We visualize this difference across several dimensionality reduction algorithms listed above. Second, we take the feature most explanatory of the _Mesenchymal_ subtype and output the respective high weight genes (defined by > 2.5 std dev). These genes are run through a downstream pathways analysis.
# In[1]:
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('seaborn-notebook')
# In[3]:
sns.set(style='white', color_codes=True)
sns.set_context('paper',
rc={'font.size': 12,
'axes.titlesize': 15,
'axes.labelsize': 20,
'xtick.labelsize': 14,
'ytick.labelsize': 14})
# In[4]:
# Set seed for plotting
np.random.seed(123)
# In[5]:
ov_file = os.path.join('data', 'ov_subtype_info.tsv')
ov_df = pd.read_table(ov_file, index_col=0)
ov_df.head(2)
# In[6]:
def get_encoded_ovsubtype_info(encoded_df, ov_df):
"""
Process latent feature encodings and ovarian cancer subtypes dataframe
Arguments:
encoded_df - pandas dataframe of sample by latent feature encodings
ov_df - clinical data frame of ovarian cancer samples and
their corresponding TCGA subtype label
Output:
A tuple consisting of:
1) A merged DataFrame of encodings by subtype, with color assignments
2) A summary DataFrame of mean encodings across subtypes
"""
# Subset and merge the HGSC subtype info with the latent space feature activations
ov_samples = list(set(encoded_df.index) & (set(ov_df.index)))
ov_encoded = encoded_df.loc[ov_samples, ]
ov_encoded_subtype_df = pd.merge(ov_df.loc[:, ['SUBTYPE', 'SILHOUETTE WIDTH']], ov_encoded,
how='right', left_index=True, right_index=True)
ov_encoded_subtype_df = ov_encoded_subtype_df.assign(subtype_color =
ov_encoded_subtype_df['SUBTYPE'])
ov_subtype_color_dict = {'Differentiated': 'purple',
'Immunoreactive': 'green',
'Mesenchymal': 'blue',
'Proliferative': 'red'}
ov_encoded_subtype_df = ov_encoded_subtype_df.replace({'subtype_color': ov_subtype_color_dict})
# Get mean subtype vectors
ov_mean_subtype_df = ov_encoded_subtype_df.groupby('SUBTYPE').mean()
return (ov_encoded_subtype_df, ov_mean_subtype_df)
# In[7]:
def ov_subtraction(ov_mean_df, subtype_tuple, algorithm):
"""
Determine the ranked difference between ovarian cancer subtypes according to input mean
encoded feature activation
Arguments:
ov_mean_df - DataFrame indicating the mean vector representation of ovarian cancer subtypes
subtype_tuple - a tuple storing two strings indicating the subtraction to perform
select two: 'Mesenchymal', 'Proliferative', 'Immunoreactive', or 'Differentated'
algorithm - a string indicating the algorithm used. Will form the column names in the output
Output:
A ranking of encoded feature differences
"""
subtype_a, subtype_b = subtype_tuple
mean_a_vector = ov_mean_df.loc[subtype_a, [str(x) for x in range(1, 101)]]
mean_b_vector = ov_mean_df.loc[subtype_b, [str(x) for x in range(1, 101)]]
ov_vector = mean_a_vector - mean_b_vector
ov_vector = ov_vector.sort_values(ascending=False)
ov_vector = ov_vector.reset_index()
ov_vector.columns = ['{}_features'.format(algorithm), algorithm]
return ov_vector
# ## Load Encoded Feature Data
# In[8]:
pca_file = 'https://github.com/gwaygenomics/pancan_viz/raw/7725578eaefe3eb3f6caf2e03927349405780ce5/data/pca_rnaseq.tsv.gz'
ica_file = 'https://github.com/gwaygenomics/pancan_viz/raw/7725578eaefe3eb3f6caf2e03927349405780ce5/data/ica_rnaseq.tsv.gz'
nmf_file = 'https://github.com/gwaygenomics/pancan_viz/raw/7725578eaefe3eb3f6caf2e03927349405780ce5/data/nmf_rnaseq.tsv.gz'
adage_file = 'https://github.com/greenelab/tybalt/raw/87496e23447a06904bf9c07c389584147b87bd65/data/encoded_adage_features.tsv'
vae_file = 'https://github.com/greenelab/tybalt/raw/87496e23447a06904bf9c07c389584147b87bd65/data/encoded_rnaseq_onehidden_warmup_batchnorm.tsv'
vae_twolayer_file = '../tybalt/data/encoded_rnaseq_twohidden_100model.tsv.gz'
vae_twolayer300_file = '../tybalt/data/encoded_rnaseq_twohidden_300model.tsv.gz'
pca_encoded_df = pd.read_table(pca_file, index_col=0)
ica_encoded_df = pd.read_table(ica_file, index_col=0)
nmf_encoded_df = pd.read_table(nmf_file, index_col=0)
adage_encoded_df = pd.read_table(adage_file, index_col=0)
vae_encoded_df = pd.read_table(vae_file, index_col=0)
vae_twolayer_encoded_df = pd.read_table(vae_twolayer_file, index_col=0)
vae_twolayer300_encoded_df = pd.read_table(vae_twolayer300_file, index_col=0)
# ## Process encoded feature data
# In[9]:
pca_ov_df, pca_ov_mean_df = get_encoded_ovsubtype_info(pca_encoded_df, ov_df)
ica_ov_df, ica_ov_mean_df = get_encoded_ovsubtype_info(ica_encoded_df, ov_df)
nmf_ov_df, nmf_ov_mean_df = get_encoded_ovsubtype_info(nmf_encoded_df, ov_df)
adage_ov_df, adage_ov_mean_df = get_encoded_ovsubtype_info(adage_encoded_df, ov_df)
vae_ov_df, vae_ov_mean_df = get_encoded_ovsubtype_info(vae_encoded_df, ov_df)
vae_tl_ov_df, vae_tl_ov_mean_df = get_encoded_ovsubtype_info(vae_twolayer_encoded_df, ov_df)
vae_tl300_ov_df, vae_tl300_ov_mean_df = get_encoded_ovsubtype_info(vae_twolayer300_encoded_df, ov_df)
# ## HGSC Subtype Arithmetic
#
# Because of the relationship observed in the consistent clustering solutions, perform the following subtraction:
#
# _Immunoreactive_ - _Mesenchymal_
#
# The goal is to observe the features with the largest difference and compare what the features represent depending on the dimensionality reduction algorithm
#
# ### Part I. Visualizing feature activation differences across algorithms
# In[10]:
mes_immuno = ('Mesenchymal', 'Immunoreactive')
# In[11]:
algorithms = ['pca', 'ica', 'nmf', 'adage', 'tybalt', 'vae_100', 'vae_300']
pca_ov_vector = ov_subtraction(pca_ov_mean_df, mes_immuno, 'pca')
ica_ov_vector = ov_subtraction(ica_ov_mean_df, mes_immuno, 'ica')
nmf_ov_vector = ov_subtraction(nmf_ov_mean_df, mes_immuno, 'nmf')
adage_ov_vector = ov_subtraction(adage_ov_mean_df, mes_immuno, 'adage')
vae_ov_vector = ov_subtraction(vae_ov_mean_df, mes_immuno, 'tybalt')
vae_tl_ov_vector = ov_subtraction(vae_tl_ov_mean_df, mes_immuno, 'vae_100')
vae_tl300_ov_vector = ov_subtraction(vae_tl300_ov_mean_df, mes_immuno, 'vae_300')
# In[12]:
latent_space_df = pd.concat([pca_ov_vector, ica_ov_vector,
nmf_ov_vector, adage_ov_vector,
vae_ov_vector, vae_tl_ov_vector,
vae_tl300_ov_vector], axis=1)
latent_space_df.head(2)
# In[13]:
# Process latent space dataframe to long format
long_latent_df = latent_space_df.stack().reset_index()
long_latent_df.columns = ['rank', 'algorithm', 'feature_activity']
# Distinguish node activation by feature
long_algorithms_df = long_latent_df[long_latent_df['algorithm'].isin(algorithms)]
long_algorithms_df.reset_index(drop=True, inplace=True)
long_features_df = long_latent_df[~long_latent_df['algorithm'].isin(algorithms)]
long_features_df.reset_index(drop=True, inplace=True)
# Concatenate node assignments to the dataframe
long_latent_space_df = pd.concat([long_algorithms_df, long_features_df],
ignore_index=True, axis=1)
long_latent_space_df.columns = ['rank', 'algorithm', 'activation', 'feature_rank',
'feature_name', 'feature']
long_latent_space_df.head(2)
# In[14]:
# Assign color to each algorithm
long_latent_space_df = long_latent_space_df.assign(algorithm_color =
long_latent_space_df['algorithm'])
algorithm_color_dict = {'pca': '#a6cee3',
'ica': '#1f78b4',
'nmf': '#b2df8a',
'adage': '#33a02c',
'tybalt': '#fb9a99',
'vae_100': '#e31a1c',
'vae_300': '#fdbf6f'}
long_latent_space_df = long_latent_space_df.replace({'algorithm_color': algorithm_color_dict})
# Drop redundant columns
long_latent_space_df = long_latent_space_df.drop(['feature_rank', 'feature_name'], axis=1)
long_latent_space_df.head(2)
# In[15]:
# Output ranking and activation scores per feature per algorithm
latent_output_file = os.path.join('results',
'hgsc_mesenchymal_immunoreactive_algorithm_subtract.tsv')
long_latent_space_df.to_csv(latent_output_file, index=False, sep='\t')
print(long_latent_space_df.shape)
long_latent_space_df.head()
# In[16]:
latent_space_figure = os.path.join('figures', 'algorithm_comparison_latent_space.png')
ax = sns.pointplot(x='rank',
y='activation',
hue='algorithm',
data=long_latent_space_df,
palette=algorithm_color_dict,
markers=['x', '3', '4', 'd', '*', 'd','o'],
orient='v',
scale=0.6)
ax.set_xlabel('Feature Rank')
ax.set_ylabel('Mesenchymal - Immunoreactive\nFeature Activation')
ax.set(xticklabels=[]);
plt.tight_layout()
plt.setp(ax.get_legend().get_texts(), fontsize='12')
plt.setp(ax.get_legend().get_title(), fontsize='16')
plt.savefig(latent_space_figure, dpi=600, height=6, width=5)
# ### Part II. Extract high weight genes from each most explanatory feature
# In[17]:
algorithms = ['PCA', 'ICA', 'NMF', 'adage', 'tybalt', 'vae_100', 'vae_300']
# In[18]:
def get_high_weight_genes(weight_matrix, node, algorithm, high_std=2.5, direction='positive',
output_file=''):
"""
Determine high weight genes given a gene weight matrix and feature
Arguments:
weight_matrix - pandas DataFrame storing gene weights for each feature
node - An integer representing the index of the feature of interest
algorithm - A string that will be included as a column in the output DataFrame
high_std - The cutoff to determine a high weight gene
direction - A string deciding which tail to consider high weight genes from
output_file - A string representing a file path to save output. Will not save if empty
Output:
A tuple consisting of two DataFrames: (high weight genes, all node genes)
"""
genes = weight_matrix.loc[int(node), :].sort_values(ascending=False)
if direction == 'positive':
hw_pos_cutoff = genes.mean() + (genes.std() * high_std)
node_df = (genes[genes > hw_pos_cutoff])
elif direction == 'negative':
hw_neg_cutoff = genes.mean() - (genes.std() * high_std)
node_df = (genes[genes < hw_neg_cutoff])
node_df = pd.DataFrame(node_df).reset_index()
node_df.columns = ['genes', 'weight']
if output_file:
node_df.to_csv(output_file, index=False, sep='\t')
# Process return data
genes_df = pd.DataFrame(genes).reset_index()
genes_df.columns = ['gene', 'activation']
genes_df = genes_df.assign(algorithm=algorithm)
return (node_df, genes_df)
# In[19]:
# Load feature matrices
feature_matrix = dict()
pca_feature_file = '../pancan_viz/data/pca_feature_rnaseq.tsv.gz'
ica_feature_file = '../pancan_viz/data/ica_feature_rnaseq.tsv.gz'
nmf_feature_file = '../pancan_viz/data/nmf_feature_rnaseq.tsv.gz'
adage_feature_file = 'https://github.com/greenelab/tybalt/raw/4bb7c5c5eb6b9dfe843269f8c3059e1168542b55/results/adage_gene_weights.tsv'
tybalt_feature_file = 'https://github.com/greenelab/tybalt/raw/928804ffd3bb3f9d5559796b2221500c303ed92c/results/tybalt_gene_weights.tsv'
vae_feature_twolayer_file = 'https://github.com/greenelab/tybalt/raw/7d2854172b57efc4b92ca80d3ec86dfbbc3e4325/data/tybalt_gene_weights_twohidden100.tsv'
vae_feature_twolayer300_file = 'https://github.com/greenelab/tybalt/raw/7d2854172b57efc4b92ca80d3ec86dfbbc3e4325/data/tybalt_gene_weights_twohidden300.tsv'
feature_matrix['PCA'] = pd.read_table(pca_feature_file, index_col=0)
feature_matrix['ICA'] = | pd.read_table(ica_feature_file, index_col=0) | pandas.read_table |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.