prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os, datetime, pymongo, configparser
import pandas as pd
from bson import json_util
global_config = None
global_client = None
global_stocklist = None
def getConfig(root_path):
global global_config
if global_config is None:
#print("initial Config...")
global_config = configparser.ConfigParser()
global_config.read(root_path + "/" + "config.ini")
return global_config
def getClient():
global global_client
from pymongo import MongoClient
if global_client is None:
#print("initial DB Client...")
global_client = MongoClient('localhost', 27017)
return global_client
def getCollection(database, collection):
client = getClient()
db = client[database]
return db[collection]
def getStockList(root_path, database, sheet):
global global_stocklist
if global_stocklist is None:
#print("initial Stock List...")
global_stocklist = queryStockList(root_path, database, sheet)
return global_stocklist
def setStockList(df):
global global_stocklist
df.set_index('symbol', inplace=True)
global_stocklist = df
return global_stocklist
def readFromCollection(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find(queryString)
df = pd.DataFrame(list(result))
if df.empty == False: del df['_id']
return df
def writeToCollection(collection, df, id = None):
jsonStrings = df.to_json(orient='records')
bsonStrings = json_util.loads(jsonStrings)
for string in bsonStrings:
if id is not None:
id_string = ''.join([string[item] for item in id])
string['_id'] = id_string
collection.save(string)
def readFromCollectionExtend(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find_one(queryString)
if result is None:
return pd.DataFrame(), {}
return pd.read_json(result['data'], orient='records'), result['metadata']
def writeToCollectionExtend(collection, symbol, df, metadata=None):
jsonStrings = {"_id":symbol, "symbol":symbol, "data":df.to_json(orient='records'), "metadata":metadata}
#bsonStrings = json_util.loads(jsonStrings)
collection.save(jsonStrings)
def writeToCSV(csv_dir, CollectionKey, df):
if os.path.exists(csv_dir) == False:
os.makedirs(csv_dir)
filename = csv_dir + CollectionKey + '.csv'
df.to_csv(filename)
def queryStockList(root_path, database, sheet):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False: df = setStockList(df)
return df
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=0)
if df.empty == False: df = setStockList(df)
return df
return pd.DataFrame()
except Exception as e:
print("queryStockList Exception", e)
return pd.DataFrame()
return pd.DataFrame()
def storeStockList(root_path, database, sheet, df, symbol = None):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
if symbol is not None:
df = df[df.index == symbol].reset_index()
writeToCollection(collection, df, ['symbol'])
# try:
# index_info = collection.index_information()
# print("index info", index_info)
# except Exception as e:
# print(e)
# writeToCollection(collection, df)
# #collection.create_index('symbol', unique=True, drop_dups=True)
# else:
# writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storeStockList Exception", e)
def queryStockPublishDay(root_path, database, sheet, symbol):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename) == False: return ''
df = pd.read_csv(filename, index_col=["index"])
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
except Exception as e:
print("queryStockPublishDay Exception", e)
return ''
return ''
def storePublishDay(root_path, database, sheet, symbol, date):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=["index"])
publishDate = df[df['symbol'] == symbol]
if publishDate.empty:
df.loc[len(df)] = [symbol, date]
else:
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storePublishDay Exception", e)
def queryStock(root_path, database, sheet_1, sheet_2, symbol, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
stockList = getStockList(root_path, database, sheet_1)
lastUpdateTime = pd.Timestamp(stockList.loc[symbol][update_key])
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
queryString = { "symbol" : symbol }
df, metadata = readFromCollectionExtend(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
df.set_index('date', inplace=True)
if 'index' in df:
del df['index']
return df, lastUpdateTime
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = csv_dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename, index_col=["date"])
return df, lastUpdateTime
except Exception as e:
print("queryStock Exception", e)
return pd.DataFrame(), lastUpdateTime
return pd.DataFrame(), lastUpdateTime
def storeStock(root_path, database, sheet_1, sheet_2, symbol, df, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = getStockList(root_path, database, sheet_1)
if (stockList[stockList.index == symbol][update_key][0] != now_date):
stockList.set_value(symbol, update_key, now_date)
storeStockList(root_path, database, sheet_1, stockList, symbol)
# df.set_index('date')
# df.index = df.index.astype(str)
# df.sort_index(ascending=True, inplace=True)
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = df.reset_index()
if 'date' in df: df.date = df.date.astype(str)
writeToCollectionExtend(collection, symbol, df, {})
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database)+ config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeStock Exception", e)
def queryNews(root_path, database, sheet, symbol):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
lastUpdateTime = pd.Timestamp(getStockList(root_path, database, 'SHEET_US_DAILY').loc[symbol]['news_update'])
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : symbol }
df = readFromCollection(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
#df.set_index('date', inplace=True)
return df, lastUpdateTime
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = | pd.read_csv(filename) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 23 10:10:30 2020.
@author: SW274998
"""
import requests
from nseta.common.constants import NSE_INDICES, INDEX_DERIVATIVES
from nseta.resources.resources import *
from nseta.common.log import default_logger
from nseta.common.tradingtime import IST_datetime
import datetime
from functools import partial
try:
import pandas as pd
except ImportError:
pass
import enum
import zipfile
import threading
import six
import numpy as np
from urllib.parse import urlparse
__all__ = ['human_readable_df','ParseNews','Recommendation','months','Direction','concatenated_dataframe','is_index','is_index_derivative', 'StrDate', 'ParseTables', 'unzip_str', 'ThreadReturns', 'URLFetch']
class Direction(enum.Enum):
Down = 1
Neutral = 2
Up = 3
V = 4
InvertedV = 5
LowerLow = 6
HigherHigh = 7
OverBought = 8
OverSold = 9
PossibleReversalUpward = 10
PossibleReversalDownward = 11
class Recommendation(enum.Enum):
Unknown = 1
Buy = 2
Sell = 3
Hold = 4
def is_index(index):
return index in NSE_INDICES
def is_index_derivative(index):
return index in INDEX_DERIVATIVES
months = ["Unknown",
"January",
"Febuary",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"]
class StrDate(datetime.date):
"""
for pattern-
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
"""
def __new__(cls, date, format):
if(isinstance(date,datetime.date)):
return datetime.date.__new__(datetime.date, date.year,
date.month, date.day)
dt = datetime.datetime.strptime(date, format)
if(isinstance(dt,datetime.datetime)):
return dt
return datetime.date.__new__(datetime.date, dt.year,
dt.month, dt.day)
@classmethod
def default_format(cls, format):
"""
returns a new class with a default parameter format in the __new__
method. so that string conversions would be simple in TableParsing with
single parameter
"""
class Date_Formatted(cls):
pass
Date_Formatted.__new__ = partial(cls.__new__, format = format)
return Date_Formatted
class ParseTables:
def __init__(self, *args, **kwargs):
self.schema = kwargs.get('schema')
self.bs = kwargs.get('soup')
self.headers = kwargs.get('headers')
self.index = kwargs.get('index')
self._parse()
def _parse(self):
trs = self.bs.find_all('tr')
lists = []
schema = self.schema
for tr in trs:
tds = tr.find_all('td')
if len(tds) == len(schema):
lst = []
for i in range(0, len(tds)):
txt = tds[i].text.replace('\n','').replace(' ','').replace(',','')
try:
val = schema[i](txt)
except Exception:
if schema[i]==float or schema[i]==int:
val = np.nan
else:
val = ''
#raise ValueError("Error in %d. %s(%s)"%(i, str(schema[i]), txt))
except SystemExit:
pass
lst.append(val)
lists.append(lst)
self.lists = lists
def get_tables(self):
return self.lists
def get_df(self):
| pd.set_option('mode.chained_assignment', None) | pandas.set_option |
from copy import deepcopy
import datetime
import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas._testing as tm
class TestDataFrameMisc:
@pytest.mark.parametrize("attr", ["index", "columns"])
def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index([f"foo#{c}" for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index([f"{c}#foo" for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index([f"%{c}" for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index([f"{c}%" for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
| tm.assert_series_equal(s, expected) | pandas._testing.assert_series_equal |
from __future__ import absolute_import, division, print_function
import datetime
import pandas as pd
from config import *
def _drop_in_time_slice(m2m, m2b, m5cb, time_slice, to_drop):
"""Drops certain members from data structures, only in a given time slice.
This can be useful for removing people who weren't there on a specific day, or non-participants.
"""
logger.debug("Removing data: {} {}".format(time_slice, to_drop))
m2m.drop(m2m.loc[(time_slice, slice(None), to_drop), :].index, inplace=True)
m2m.drop(m2m.loc[(time_slice, to_drop, slice(None)), :].index, inplace=True)
m2b.drop(m2b.loc[(time_slice, to_drop, slice(None)), :].index, inplace=True)
m5cb.drop(m5cb.loc[(time_slice, to_drop), :].index, inplace=True)
def _clean_m2m(where, participation_dates, battery_sundays):
logger.info('loading m2m')
m2m = pd.read_hdf(dirty_store_path, 'proximity/member_to_member', where=where)
logger.info("original m2m len: {}".format(len(m2m)))
if len(m2m) == 0:
return
logger.info('cleaning m2m')
m2m.reset_index(inplace=True)
# Mark all records as not to keep. This removes all non-participants
m2m['keep'] = False
# For m2m, we need to look on both sides. Therefore, for each participating member, we will
# turn on a "keep" flag if the member is valid on either sides of the connection. Then, we will only keep
# records in which both sides are valid
logger.info('Keeping only dates relevant dates for each participant')
i = 0
total_count = len(participation_dates)
for item, p in participation_dates.iterrows():
i += 1
logger.debug("({}/{}) {},{},{}".format(i, total_count, p.member, p.start_date_ts, p.end_date_ts))
side1_cond = ((m2m.member1 == p.member) & (m2m.datetime >= p.start_date_ts) & (m2m.datetime < p.end_date_ts))
m2m.loc[side1_cond, 'keep_1'] = True
side2_cond = ((m2m.member2 == p.member) & (m2m.datetime >= p.start_date_ts) & (m2m.datetime < p.end_date_ts))
m2m.loc[side2_cond, 'keep_2'] = True
m2m.loc[(m2m.keep_1 == True) & (m2m.keep_2 == True), 'keep'] = True
del m2m['keep_1']
del m2m['keep_2']
logger.info('So far, keeping {} rows'.format(len(m2m[m2m['keep'] == True])))
# Remove times of battery changes
logger.info('Removing times of battery changes')
i = 0
total_count = len(battery_sundays)
for item, s in battery_sundays.iterrows():
i += 1
logger.debug("({}/{}) {},{}".format(i, total_count, s.battery_period_start, s.battery_period_end))
cond = ((m2m.datetime >= s.battery_period_start) & (m2m.datetime <= s.battery_period_end))
m2m.loc[cond, 'keep'] = False
logger.info('So far, keeping {} rows'.format(len(m2m[m2m['keep'] == True])))
m2m = m2m[m2m.keep == True]
logger.info("after cleaning: {}".format(len(m2m)))
del m2m['keep']
m2m.set_index(['datetime','member1','member2'], inplace=True)
logger.info("appending cleaned m2m to {}".format(clean_store_path))
with pd.HDFStore(clean_store_path) as store:
store.append('proximity/member_to_member', m2m)
del m2m
def _clean_m2b(where, participation_dates, battery_sundays):
logger.info('loading m2b')
m2b = | pd.read_hdf(dirty_store_path, 'proximity/member_to_beacon', where=where) | pandas.read_hdf |
import numpy as np
import pandas as pd
import os
import sys
import pandas as pd
sys.path.append('./')
from featureband.feature_band import FeatureBand
from featureband.util.data_util import load_dataset
from featureband.util.metrics_util import evaluate_cross_validation, load_clf
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
DATASET = "rna" # ["madelon", "basehock", "usps", "coil20"]
FINAL_CLASSIFIER = "logistic" # ["knn", "logistic", "linear_svm"]
k = 300
n_splits = 5
r0 = 50
max_iter = 50
population_size = 10
n0 = 500
#x, y = load_dataset(DATASET)
x = np.array( | pd.read_csv('./medicaldata/tpotfssRNASeq/Xtrain.csv') | pandas.read_csv |
"""
.. module:: reporters
:platform: Unix, Windows
:synopsis: a module for defining OpenMM reporter classes.
.. moduleauthor:: <NAME> <<EMAIL>>
.. _pandas.DataFrame: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
.. _StateDataReporter: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.app.statedatareporter.StateDataReporter.html
.. _CustomIntegrator: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomIntegrator.html
.. _CustomCVForce: docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomCVForce.html
"""
import sys
import numpy as np
import pandas as pd
from simtk import openmm
from simtk import unit
from simtk.openmm import app
from .computers import PressureComputer
from .computers import _MoleculeTotalizer
from .utils import InputError
class _MultiStream:
def __init__(self, outputs):
self._outputs = list()
for output in outputs:
self._outputs.append(open(output, 'w') if isinstance(output, str) else output)
def __del__(self):
for output in self._outputs:
if output != sys.stdout and output != sys.stderr:
output.close()
def write(self, message):
for output in self._outputs:
output.write(message)
def flush(self):
for output in self._outputs:
output.flush()
class _AtomsMM_Reporter():
"""
Base class for reporters.
"""
def __init__(self, file, reportInterval, **kwargs):
self._reportInterval = reportInterval
self._requiresInitialization = True
self._needsPositions = False
self._needsVelocities = False
self._needsForces = False
self._needEnergy = False
extraFile = kwargs.pop('extraFile', None)
if extraFile is None:
self._out = open(file, 'w') if isinstance(file, str) else file
else:
self._out = _MultiStream([file, extraFile])
self._separator = kwargs.pop('separator', ',')
def _initialize(self, simulation, state):
pass
def _generateReport(self, simulation, state):
pass
def describeNextReport(self, simulation):
"""
Get information about the next report this object will generate.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
Returns
-------
tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep % self._reportInterval
return (steps, self._needsPositions, self._needsVelocities, self._needsForces, self._needEnergy)
def report(self, simulation, state):
"""
Generate a report.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
state : State
The current state of the simulation
"""
if self._requiresInitialization:
self._initialize(simulation, state)
self._requiresInitialization = False
self._generateReport(simulation, state)
class ExtendedStateDataReporter(app.StateDataReporter):
"""
An extension of OpenMM's StateDataReporter_ class, which outputs information about a simulation,
such as energy and temperature, to a file.
All original functionalities of StateDataReporter_ are preserved and the following ones are
included:
1. Report the Coulomb contribution of the potential energy (keyword: `coulombEnergy`):
This contribution includes both real- and reciprocal-space terms.
2. Report the atomic virial of a fully-flexible system (keyword: `atomicVirial`):
Considering full scaling of atomic coordinates in a box volume change (i.e. without any
distance constraints), the internal virial of the system is given by
.. math::
W = -\\sum_{i,j} r_{ij} E^\\prime(r_{ij}),
where :math:`E^\\prime(r)` is the derivative of the pairwise interaction potential as a
function of the distance between to atoms. Such interaction includes van der Waals, Coulomb,
and bond-stretching contributions. Bond-bending and dihedral angles are not considered
because they are invariant to full volume-scaling of atomic coordinates.
3. Report the nonbonded contribution of the atomic virial (keyword: `nonbondedVirial`):
The nonbonded virial is given by
.. math::
W_\\mathrm{nb} = -\\sum_{i,j} r_{ij} E_\\mathrm{nb}^\\prime(r_{ij}),
where :math:`E_\\mathrm{nb}^\\prime(r)` is the derivative of the nonbonded pairwise
potential, which comprises van der Waals and Coulomb interactions only.
4. Report the atomic pressure of a fully-flexible system (keyword: `atomicPressure`):
.. math::
P = \\frac{2 K + W}{3 V},
where :math:`K` is the kinetic energy sum for all atoms in the system. If keyword
`bathTemperature` is employed (see below), the instantaneous kinetic energy is substituted
by its equipartition-theorem average
:math:`\\left\\langle K \\right\\rangle = 3 N_\\mathrm{atoms} k_B T/2`,
where :math:`T` is the heat-bath temperature.
5. Report the molecular virial of a system (keyword: `molecularVirial`):
To compute the molecular virial, only the center-of-mass coordinates of the molecules are
considered to scale in a box volume change, while the internal molecular structure is kept
unaltered. The molecular virial is computed from the nonbonded part of the atomic virial by
using the formulation of Ref. :cite:`Hunenberger_2002`:
.. math::
W_\\mathrm{mol} = W - \\sum_{i} (\\mathbf{r}_i - \\mathbf{r}_i^\\mathrm{cm}) \\cdot \\mathbf{F}_i,
where :math:`\\mathbf{r}_i` is the coordinate of atom i, :math:`\\mathbf{F}_i` is the
resultant pairwise force acting on it (excluding bond-bending and dihedral angles), and
:math:`\\mathbf{r}_i^\\mathrm{cm}` is the center-of-mass coordinate of its containing
molecule.
6. Report the molecular pressure of a system (keyword: `molecularPressure`):
.. math::
P = \\frac{2 K_\\mathrm{mol} + W_\\mathrm{mol}}{3 V},
where :math:`K_\\mathrm{mol}` is the center-of-mass kinetic energy summed for all molecules
in the system. If keyword `bathTemperature` is employed (see below), the instantaneous
kinetic energy is substituted by its equipartition-theorem average
:math:`\\left\\langle K_\\mathrm{mol} \\right\\rangle = 3 N_\\mathrm{mols} k_B T/2`,
where :math:`T` is the heat-bath temperature.
7. Report the center-of-mass kinetic energy (keyword: `molecularKineticEnergy`):
.. math::
K_\\mathrm{mol} = \\frac{1}{2} \\sum_{i=1}^{N_\\mathrm{mol}} M_i v_{\\mathrm{cm}, i}^2,
where :math:`N_\\mathrm{mol}` is the number of molecules in the system, :math:`M_i` is the
total mass of molecule `i`, and :math:`v_{\\mathrm{cm}, i}` is the center-of-mass velocity
of molecule `i`.
8. Report potential energies at multiple global parameter states (keyword: `globalParameterStates`):
Computes and reports the potential energy of the system at a number of provided global
parameter states.
9. Report global parameter values (keyword: `globalParameters`):
Reports the values of specified global parameters.
10. Report derivatives of energy with respect to global parameters (keyword: `energyDerivatives`):
Computes and reports derivatives of the potential energy of the system at the current
state with respect to specified global parameters.
11. Report values of collective variables (keyword: `collectiveVariables`)
Report the values of a set of collective variables.
12. Allow specification of an extra file for reporting (keyword: `extraFile`).
This can be used for replicating a report simultaneously to `sys.stdout` and to a file
using a unique reporter.
Keyword Args
------------
coulombEnergy : bool, optional, default=False
Whether to write the Coulomb contribution of the potential energy to the file.
atomicVirial : bool, optional, default=False
Whether to write the total atomic virial to the file.
nonbondedVirial : bool, optional, default=False
Whether to write the nonbonded contribution to the atomic virial to the file.
atomicPressure : bool, optional, default=False
Whether to write the internal atomic pressure to the file.
molecularVirial : bool, optional, default=False
Whether to write the molecular virial to the file.
molecularPressure : bool, optional, default=False
Whether to write the internal molecular pressure to the file.
molecularKineticEnergy : bool, optional, default=False
Whether to write the molecular center-of-mass kinetic energy to the file.
globalParameterStates : pandas.DataFrame_, optional, default=None
A DataFrame containing context global parameters (column names) and sets of values
thereof. If it is provided, then the potential energy will be reported for every state
these parameters define.
globalParameters : list(str), optional, default=None
A list of global parameter names. If it is provided, then the values of these parameters
will be reported.
energyDerivatives : list(str), optional, default=None
A list of global parameter names. If it is provided, then the derivatives of the
total potential energy with respect to these parameters will be reported. It is
necessary that the calculation of these derivatives has been activated beforehand
(see, for instance, CustomIntegrator_).
collectiveVariables : list(openmm.CustomCVForce), optional, default=None
A list of CustomCVForce_ objects. If it is provided, then the values of all collective
variables associated with these objects will be reported.
pressureComputer : :class:`~atomsmm.computers.PressureComputer`, optional, default=None
A computer designed to determine pressures and virials. This is mandatory if any keyword
related to virial or pressure is set as `True`.
extraFile : str or file, optional, default=None
Extra file to write to, specified as a file name or a file object.
"""
def __init__(self, file, reportInterval, **kwargs):
self._coulombEnergy = kwargs.pop('coulombEnergy', False)
self._atomicVirial = kwargs.pop('atomicVirial', False)
self._nonbondedVirial = kwargs.pop('nonbondedVirial', False)
self._atomicPressure = kwargs.pop('atomicPressure', False)
self._molecularVirial = kwargs.pop('molecularVirial', False)
self._molecularPressure = kwargs.pop('molecularPressure', False)
self._molecularKineticEnergy = kwargs.pop('molecularKineticEnergy', False)
self._globalParameterStates = kwargs.pop('globalParameterStates', None)
self._globalParameters = kwargs.pop('globalParameters', None)
self._energyDerivatives = kwargs.pop('energyDerivatives', None)
self._collectiveVariables = kwargs.pop('collectiveVariables', None)
self._pressureComputer = kwargs.pop('pressureComputer', None)
extra = kwargs.pop('extraFile', None)
if extra is None:
super().__init__(file, reportInterval, **kwargs)
else:
super().__init__(_MultiStream([file, extra]), reportInterval, **kwargs)
self._computing = any([self._coulombEnergy,
self._atomicVirial,
self._nonbondedVirial,
self._atomicPressure,
self._molecularVirial,
self._molecularPressure,
self._molecularKineticEnergy])
if self._computing:
if self._pressureComputer is not None and not isinstance(self._pressureComputer, PressureComputer):
raise InputError('keyword "pressureComputer" requires a PressureComputer instance')
self._needsPositions = True
self._needsForces = any([self._needsForces,
self._molecularVirial,
self._molecularPressure])
self._needsVelocities = any([self._needsVelocities,
self._molecularPressure,
self._atomicPressure,
self._molecularKineticEnergy])
self._backSteps = -sum([self._speed, self._elapsedTime, self._remainingTime])
def _add_item(self, lst, item):
if self._backSteps == 0:
lst.append(item)
else:
lst.insert(self._backSteps, item)
def _constructHeaders(self):
headers = super()._constructHeaders()
if self._coulombEnergy:
self._add_item(headers, 'Coulomb Energy (kJ/mole)')
if self._atomicVirial:
self._add_item(headers, 'Atomic Virial (kJ/mole)')
if self._nonbondedVirial:
self._add_item(headers, 'Nonbonded Virial (kJ/mole)')
if self._atomicPressure:
self._add_item(headers, 'Atomic Pressure (atm)')
if self._molecularVirial:
self._add_item(headers, 'Molecular Virial (kJ/mole)')
if self._molecularPressure:
self._add_item(headers, 'Molecular Pressure (atm)')
if self._molecularKineticEnergy:
self._add_item(headers, 'Molecular Kinetic Energy (kJ/mole)')
if self._globalParameterStates is not None:
for index in self._globalParameterStates.index:
self._add_item(headers, 'Energy[{}] (kJ/mole)'.format(index))
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(headers, name)
if self._energyDerivatives is not None:
for name in self._energyDerivatives:
self._add_item(headers, 'diff(E,{})'.format(name))
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for index in range(force.getNumCollectiveVariables()):
name = force.getCollectiveVariableName(index)
self._add_item(headers, name)
return headers
def _constructReportValues(self, simulation, state):
values = super()._constructReportValues(simulation, state)
if self._computing:
computer = self._pressureComputer
computer.import_configuration(state)
atomicVirial = computer.get_atomic_virial().value_in_unit(unit.kilojoules_per_mole)
if self._coulombEnergy:
coulombVirial = computer.get_coulomb_virial()
self._add_item(values, coulombVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicVirial:
self._add_item(values, atomicVirial)
if self._nonbondedVirial:
nonbondedVirial = computer.get_dispersion_virial() + computer.get_coulomb_virial()
self._add_item(values, nonbondedVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicPressure:
atomicPressure = computer.get_atomic_pressure()
self._add_item(values, atomicPressure.value_in_unit(unit.atmospheres))
if self._molecularVirial or self._molecularPressure:
forces = state.getForces(asNumpy=True)
if self._molecularVirial:
molecularVirial = computer.get_molecular_virial(forces)
self._add_item(values, molecularVirial.value_in_unit(unit.kilojoules_per_mole))
if self._molecularPressure:
molecularPressure = computer.get_molecular_pressure(forces)
self._add_item(values, molecularPressure.value_in_unit(unit.atmospheres))
if self._molecularKineticEnergy:
molKinEng = computer.get_molecular_kinetic_energy()
self._add_item(values, molKinEng.value_in_unit(unit.kilojoules_per_mole))
if self._globalParameterStates is not None:
original = dict()
for name in self._globalParameterStates.columns:
original[name] = simulation.context.getParameter(name)
latest = original.copy()
for index, row in self._globalParameterStates.iterrows():
for name, value in row.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
latest[name] = value
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
self._add_item(values, energy.value_in_unit(unit.kilojoules_per_mole))
for name, value in original.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(values, simulation.context.getParameter(name))
if self._energyDerivatives is not None:
mystate = simulation.context.getState(getParameterDerivatives=True)
derivative = mystate.getEnergyParameterDerivatives()
for name in self._energyDerivatives:
self._add_item(values, derivative[name])
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for cv in force.getCollectiveVariableValues(simulation.context):
self._add_item(values, cv)
return values
class XYZReporter(_AtomsMM_Reporter):
"""
Outputs to an XYZ-format file a series of frames containing the coordinates, velocities,
momenta, or forces on all atoms in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an XYZReporter object and append it to the Simulation's list of
reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def __init__(self, file, reportInterval, **kwargs):
self._output = kwargs.get('output', 'positions')
self._groups = kwargs.get('groups', None)
if self._output == 'positions':
self._unit = unit.angstroms
elif self._output == 'velocities':
self._unit = unit.angstroms/unit.picoseconds
elif self._output == 'momenta':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds
elif self._output == 'forces':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds**2
else:
raise InputError('Unrecognizable keyword value')
super().__init__(file, reportInterval, **kwargs)
self._needsPositions = self._output == 'positions'
self._needsVelocities = self._output in ['velocities', 'momenta']
self._needsForces = self._output == 'forces'
def _initialize(self, simulation, state):
self._symbols = [atom.element.symbol for atom in simulation.topology.atoms()]
sys = simulation.system
self._N = sys.getNumParticles()
if self._output == 'momenta':
mass = [sys.getParticleMass(i).value_in_unit(unit.dalton) for i in range(self._N)]
self._mass = np.vstack([mass, mass, mass]).transpose()*unit.dalton
def _get_values(self, simulation, state):
if self._output == 'positions':
values = state.getPositions(asNumpy=True)
elif self._output == 'velocities':
values = state.getVelocities(asNumpy=True)
elif self._output == 'momenta':
values = self._mass*state.getVelocities(asNumpy=True)
elif self._groups is None:
values = state.getForces(asNumpy=True)
else:
new_state = simulation.context.getState(getForces=True, groups=self._groups)
values = new_state.getForces(asNumpy=True)
return values.value_in_unit(self._unit)
def _write(self, step, N, names, values):
print(N, file=self._out)
pd.DataFrame(index=names, data=values).to_csv(
self._out,
sep='\t',
header=[f'{self._output} in {self._unit} at time step {step}', '', ''],
)
def _generateReport(self, simulation, state):
values = self._get_values(simulation, state)
self._write(simulation.currentStep, self._N, self._symbols, values)
class CenterOfMassReporter(XYZReporter):
"""
Outputs to an XYZ-format file a series of frames containing the center-of-mass coordinates,
center-of-mass velocities, total momenta, or resultant forces on all molecules in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an CenterOfMassReporter object and append it to the Simulation's
list of reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def _initialize(self, simulation, state):
super()._initialize(simulation, state)
self._mols = _MoleculeTotalizer(simulation.context, simulation.topology)
def _generateReport(self, simulation, state):
values = self._get_values(simulation, state)
if self._output in ['positions', 'velocities']:
cm_values = self._mols.massFrac.dot(values)
else:
cm_values = self._mols.selection.dot(values)
self._write(simulation.currentStep, self._mols.nmols, self._mols.residues, cm_values)
class CustomIntegratorReporter(_AtomsMM_Reporter):
"""
Outputs global and per-DoF variables of a CustomIntegrator instance.
Keyword Args
------------
describeOnly : bool, optional, default=True
Whether to output only descriptive statistics that summarize the activated per-Dof
variables.
"""
def __init__(self, file, reportInterval, **kwargs):
super().__init__(file, reportInterval, **kwargs)
self._describeOnly = kwargs.pop('describeOnly', True)
self._variables = []
for key, value in kwargs.items():
if value is True:
self._variables.append(key)
if not self._variables:
raise InputError("No global or perDof variables have been passed")
def _initialize(self, simulation, state):
integrator = self._integrator = simulation.integrator
if not isinstance(integrator, openmm.CustomIntegrator):
raise Exception("simulation.integrator is not a CustomIntegrator")
self._globals = {}
for index in range(integrator.getNumGlobalVariables()):
variable = integrator.getGlobalVariableName(index)
if variable in self._variables:
self._globals[variable] = index
self._perDof = {}
for index in range(integrator.getNumPerDofVariables()):
variable = integrator.getPerDofVariableName(index)
if variable in self._variables:
self._perDof[variable] = index
if set(self._variables) != set(self._globals) | set(self._perDof):
raise InputError("Unknown variables have been passed")
def _generateReport(self, simulation, state):
for variable, index in self._globals.items():
value = self._integrator.getGlobalVariable(index)
print('{}\n{}'.format(variable, value), file=self._out)
for variable, index in self._perDof.items():
values = self._integrator.getPerDofVariable(index)
titles = ['{}.{}'.format(variable, dir) for dir in ['x', 'y', 'z']]
df = pd.DataFrame(data=np.array(values), columns=titles)
if self._describeOnly:
print(df.describe(), file=self._out)
else:
df.to_csv(self._out, sep='\t')
class ExpandedEnsembleReporter(_AtomsMM_Reporter):
"""
Performs an Expanded Ensemble simulation and reports the energies of multiple states.
Parameters
----------
states : pandas.DataFrame_
A DataFrame containing context global parameters (column names) and sets of values
thereof. The potential energy will be reported for every state these parameters define.
If one of the variables is named as `weight`, then its set of values will be assigned
to every state as an importance sampling weight. Otherwise, all states will have
identical weights. States which are supposed to only have their energies reported, with
no actual visits, can have their weights set up to `-inf`.
temperature : unit.Quantity
The system temperature.
Keyword Args
------------
reportsPerExchange : int, optional, default=1
The number of reports between attempts to exchange the global parameter state, that is,
the exchange interval measured in units of report intervals.
"""
def __init__(self, file, reportInterval, states, temperature, **kwargs):
self._parameter_states = states.copy()
self._nstates = len(states.index)
self._reports_per_exchange = kwargs.pop('reportsPerExchange', 1)
super().__init__(file, reportInterval, **kwargs)
if 'weight' in states:
self._weights = self._parameter_states.pop('weight').values
finite = np.where(np.isfinite(self._weights))[0]
self._first_state = finite[0]
self._last_state = finite[-1]
else:
self._weights = np.zeros(self._nstates)
self._first_state = 0
self._last_state = self._nstates - 1
kT = (unit.MOLAR_GAS_CONSTANT_R*temperature).value_in_unit(unit.kilojoules_per_mole)
self._beta = 1.0/kT
self._nreports = 0
self._overall_visits = np.zeros(self._nstates, dtype=int)
self._downhill_visits = np.zeros(self._nstates, dtype=int)
self._probability_accumulators = np.zeros(self._nstates)
self._downhill = False
self._counting_started = False
self._regime_change = []
def _initialize(self, simulation, state):
headers = ['step', 'state']
for index in self._parameter_states.index:
headers.append('Energy[{}] (kJ/mole)'.format(index))
print(*headers, sep=self._separator, file=self._out)
def _register_visit(self, state):
if self._downhill:
if state == self._first_state:
self._downhill = False
self._regime_change.append(self._nreports)
elif state == self._last_state:
self._downhill = True
self._regime_change.append(self._nreports)
if self._counting_started:
self._overall_visits[state] += 1
if self._downhill:
self._downhill_visits[state] += 1
else:
self._counting_started = self._downhill is True
def _generateReport(self, simulation, state):
energies = np.zeros(self._nstates)
original = dict()
for name in self._parameter_states.columns:
original[name] = simulation.context.getParameter(name)
latest = original.copy()
for i, (index, row) in enumerate(self._parameter_states.iterrows()):
for name, value in row.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
latest[name] = value
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
energies[i] = energy.value_in_unit(unit.kilojoules_per_mole)
self._nreports += 1
exponents = self._weights - self._beta*energies
probabilities = np.exp(exponents - np.amax(exponents))
probabilities /= np.sum(probabilities)
self._probability_accumulators += probabilities
if self._nreports % self._reports_per_exchange == 0:
state = np.random.choice(self._nstates, p=probabilities)
for name, value in self._parameter_states.iloc[state].items():
if value != latest[name]:
simulation.context.setParameter(name, value)
self._register_visit(state)
print(simulation.currentStep, state, *energies, sep=self._separator, file=self._out)
def _isochronal_delta(self, f, n):
N = len(f)
b = 3/(n*(n+1)*(2*n+1))
seq = np.arange(1, n+1)
a = (b/2)*np.array([n*(n+1)-k*(k-1) for k in seq])
ind = np.argsort(f)
fa = f[ind]
delta = np.empty(N)
delta[0] = -fa[0]/2 + np.sum(a*fa[1:n+1])
for i in range(1, N-1):
delta[i] = b*np.sum([k*(fa[min(i+k, N-1)] - fa[max(i-k, 0)]) for k in seq])
delta[N-1] = fa[N-1]/2 - np.sum(np.flip(a)*fa[N-n-1:N-1])
delta[ind] = delta
return delta
def read_csv(self, file, **kwargs):
comment = kwargs.pop('comment', '#')
separator = kwargs.pop('sep', self._separator)
df = pd.read_csv(file, comment=comment, sep=separator, **kwargs)
energies = np.zeros(self._nstates)
for index, row in df.iterrows():
state = int(row['state'])
for i in self._parameter_states.index:
energies[i] = row['Energy[{}] (kJ/mole)'.format(i)]
self._nreports += 1
exponents = self._weights - self._beta*energies
probabilities = np.exp(exponents - np.amax(exponents))
probabilities /= np.sum(probabilities)
self._probability_accumulators += probabilities
if self._nreports % self._reports_per_exchange == 0:
self._register_visit(state)
def state_sampling_analysis(self, staging_variable=None, to_file=True, isochronal_n=2):
"""
Build histograms of states visited during the overall process as well as during downhill
walks.
Returns
-------
pandas.DataFrame_
"""
mask = self._overall_visits > 0
frame = pd.DataFrame(self._parameter_states)[mask]
histogram = self._overall_visits[mask]
downhill_fraction = self._downhill_visits[mask]/histogram
weight = self._weights[mask]
frame['weight'] = weight
frame['histogram'] = histogram/np.sum(histogram)
frame['downhill_fraction'] = downhill_fraction
if self._counting_started:
probability = self._probability_accumulators[mask]/self._nreports
free_energy = weight - np.log(probability)
free_energy -= free_energy[0]
delta = self._isochronal_delta(downhill_fraction, isochronal_n)
isochronal_weight = weight + 0.5*np.log(delta/probability)
frame['free_energy'] = free_energy
frame['isochronal_histogram'] = np.sqrt(delta*probability)
frame['isochronal_weight'] = isochronal_weight - isochronal_weight[0]
if staging_variable is not None:
x = frame[staging_variable].values
f = downhill_fraction
n = len(x)
optimal_pdf = np.sqrt(np.diff(f)/np.diff(x)) # Stepwise optimal PDF
area = optimal_pdf*np.diff(x) # Integral in each interval
optimal_cdf = np.cumsum(area)/np.sum(area) # Piecewise linear optimal CDF
optimal_x = np.interp(np.linspace(0, 1, n), np.insert(optimal_cdf, 0, 0), x)
frame['staging_{}'.format(staging_variable)] = optimal_x
frame['staging_weight'] = np.interp(optimal_x, x, free_energy)
if to_file:
print('# {0} State Sampling Analysis {0}'.format('-'*40), file=self._out)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print('# ' + frame.to_string(index=False).replace('\n', '\n# '), file=self._out)
return frame
def walking_time_analysis(self, history=False, to_file=True):
times = np.diff(np.array(self._regime_change))
downhill = self._reportInterval*times[0::2]
uphill = self._reportInterval*times[1::2]
if history:
df = pd.DataFrame({'downhill': pd.Series(downhill),
'uphill': | pd.Series(uphill) | pandas.Series |
"""Create a synthetic population that is representative of Germany."""
from pathlib import Path
import numpy as np
import pandas as pd
import pytask
import sid
from sid.shared import factorize_assortative_variables
from src.config import BLD
from src.config import N_HOUSEHOLDS
from src.config import SRC
from src.create_initial_states.create_contact_model_group_ids import (
add_contact_model_group_ids,
)
from src.create_initial_states.create_vaccination_priority import (
create_vaccination_group,
)
from src.create_initial_states.create_vaccination_priority import (
create_vaccination_rank,
)
from src.prepare_data.task_prepare_rki_data import TRANSLATE_STATES
from src.shared import create_age_groups
from src.shared import create_age_groups_rki
_DEPENDENCIES = {
# py files
"sid_shared.py": Path(sid.__file__).parent.resolve() / "shared.py",
"shared.py": SRC / "shared.py",
"create_contact_model_group_ids": SRC
/ "create_initial_states"
/ "create_contact_model_group_ids.py",
"add_weekly_ids": SRC / "create_initial_states" / "add_weekly_ids.py",
"make_educ_group_columns": SRC
/ "create_initial_states"
/ "make_educ_group_columns.py",
"create_vaccination_priority": SRC
/ "create_initial_states"
/ "create_vaccination_priority.py",
"translations": SRC / "prepare_data" / "task_prepare_rki_data.py",
#
# data
"hh_data": SRC
/ "original_data"
/ "population_structure"
/ "microcensus2010_cf.dta",
"county_probabilities": BLD / "data" / "population_structure" / "counties.parquet",
"work_daily_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "work_recurrent_daily.pkl",
"work_weekly_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "work_recurrent_weekly.pkl",
"other_daily_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "other_recurrent_daily.pkl",
"other_weekly_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "other_recurrent_weekly.pkl",
"params": BLD / "params.pkl",
}
@pytask.mark.depends_on(_DEPENDENCIES)
@pytask.mark.parametrize(
"n_hhs, produces",
[
(N_HOUSEHOLDS, BLD / "data" / "initial_states.parquet"),
(100_000, BLD / "data" / "debug_initial_states.parquet"),
],
)
def task_create_initial_states_microcensus(depends_on, n_hhs, produces):
mc = pd.read_stata(depends_on["hh_data"])
county_probabilities = pd.read_parquet(depends_on["county_probabilities"])
work_daily_dist = pd.read_pickle(depends_on["work_daily_dist"])
work_weekly_dist = pd.read_pickle(depends_on["work_weekly_dist"])
other_daily_dist = pd.read_pickle(depends_on["other_daily_dist"])
other_weekly_dist = pd.read_pickle(depends_on["other_weekly_dist"])
params = pd.read_pickle(depends_on["params"])
no_vaccination_share = params.loc[
("vaccinations", "share_refuser", "share_refuser"), "value"
]
df = _build_initial_states(
mc=mc,
county_probabilities=county_probabilities,
work_daily_dist=work_daily_dist,
work_weekly_dist=work_weekly_dist,
other_daily_dist=other_daily_dist,
other_weekly_dist=other_weekly_dist,
n_households=n_hhs,
seed=3933,
no_vaccination_share=no_vaccination_share,
)
df.to_parquet(produces)
def _build_initial_states(
mc,
county_probabilities,
work_daily_dist,
work_weekly_dist,
other_daily_dist,
other_weekly_dist,
n_households,
seed,
no_vaccination_share,
):
mc = _prepare_microcensus(mc)
equal_probs = pd.DataFrame()
equal_probs["hh_id"] = mc["hh_id"].unique()
equal_probs["probability"] = 1 / len(equal_probs)
df = _sample_mc_hhs(mc, equal_probs, n_households=n_households, seed=seed)
county_and_state = _draw_counties(
hh_ids=df["hh_id"].unique(),
county_probabilities=county_probabilities,
seed=2282,
)
df = df.merge(county_and_state, on="hh_id", validate="m:1")
df = df.astype({"age": np.uint8, "hh_id": "category"})
df = df.sort_values("hh_id").reset_index()
df.index.name = "temp_index"
assert not df.index.duplicated().any()
df["occupation"] = _create_occupation(df)
df = add_contact_model_group_ids(
df,
work_daily_dist=work_daily_dist,
work_weekly_dist=work_weekly_dist,
other_daily_dist=other_daily_dist,
other_weekly_dist=other_weekly_dist,
seed=555,
)
adult_at_home = (df["occupation"].isin(["stays home", "retired"])) & (
df["age"] >= 18
)
df["adult_in_hh_at_home"] = adult_at_home.groupby(df["hh_id"]).transform(np.any)
df["educ_contact_priority"] = _create_educ_contact_priority(df)
df["vaccination_group"] = create_vaccination_group(states=df, seed=484)
df["vaccination_rank"] = create_vaccination_rank(
df["vaccination_group"], share_refuser=no_vaccination_share, seed=909
)
# This is uncorrelated with the work contact priority.
# This allows us to easily match the empirical compliance rate.
df["rapid_test_compliance"] = np.random.uniform(low=0, high=1, size=len(df))
df["quarantine_compliance"] = np.random.uniform(low=0, high=1, size=len(df))
# factorize group id columns
to_factorize = [col for col in df if "_group_id" in col]
for col in to_factorize:
df[col], _ = factorize_assortative_variables(df, [col])
df.index.name = "index"
df = _only_keep_relevant_columns(df)
np.random.seed(1337)
df = df.sample(frac=1).reset_index(drop=True)
return df
def _prepare_microcensus(mc):
rename_dict = {
"ef1": "east_west",
"ef3s": "district_id",
"ef4s": "hh_nr_in_district",
"ef20": "hh_size",
"ef29": "work_type",
"ef31": "hh_form",
"ef44": "age",
"ef46": "gender",
"ef149": "frequency_work_saturday",
"ef150": "frequency_work_sunday",
}
mc = mc.rename(columns=rename_dict)
mc = mc[rename_dict.values()]
mc["private_hh"] = mc["hh_form"] == "bevölkerung in privathaushalten"
# restrict to private households for the moment
mc = mc[mc["private_hh"]]
mc["gender"] = (
mc["gender"]
.replace({"männlich": "male", "weiblich": "female"})
.astype("category")
)
mc["age"] = mc["age"].replace({"95 jahre und älter": 96})
mc["age_group"] = create_age_groups(mc["age"])
mc["age_group_rki"] = create_age_groups_rki(mc)
# 53% no, 21% every now and then, 17% regularly, 9% all the time
work_answers = ["ja, ständig", "ja, regelmäßig"]
mc["work_saturday"] = mc["frequency_work_saturday"].isin(work_answers)
# 72% no, 14% every now and then, 10% regularly, 3% all the time
mc["work_sunday"] = mc["frequency_work_sunday"].isin(work_answers)
mc["hh_id"] = mc.apply(_create_mc_hh_id, axis=1)
mc["hh_id"] = pd.factorize(mc["hh_id"])[0]
assert len(mc["hh_id"].unique()) == 11_461, "Wrong number of households."
keep_cols = [
"private_hh",
"gender",
"age",
"age_group",
"age_group_rki",
"work_type",
"work_saturday",
"work_sunday",
"hh_id",
]
mc = mc[keep_cols]
return mc
def _create_mc_hh_id(row):
hh_id_parts = ["east_west", "district_id", "hh_nr_in_district"]
row_id = "_".join(str(row[var]) for var in hh_id_parts)
return row_id
def _sample_mc_hhs(mc, hh_probabilities, n_households, seed):
np.random.seed(seed)
sampled_ids = np.random.choice(
hh_probabilities.hh_id,
p=hh_probabilities.probability,
size=n_households,
replace=True,
)
new_id_df = | pd.DataFrame({"old_hh_id": sampled_ids}) | pandas.DataFrame |
import csv
import pandas as pd
import argparse
import statsmodels.api as sm
import numpy as np
import math
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
parser = argparse.ArgumentParser(description="Parameters needed on the researcher side")
parser.add_argument('-i', '--dataset_file', type=str, help='Path to the original dataset file (E)', required=True)
parser.add_argument('-a', '--E_case_control_IDs_file', type=str, help='Path to the csv file that contains case and control IDs for dataset E', required=True)
parser.add_argument('-n', '--noisy_dataset_file', type=str, help='Path to the partial noisy dataset (D_k^e)', required=True)
parser.add_argument('-g', '--D_dataset_GWAS_file', type=str, help='Path to the GWAS of dataset D file', required=True)
parser.add_argument('-b', '--D_case_control_IDs_file', type=str, help='Path to the csv file that contains case and control IDs for dataset D', required=True)
parser.add_argument('-e', '--epsilon', type=int, help='Privacy parameter', required=True)
parser.add_argument('-x', '--odds_cut_off', type=float, help='Threshold (cut-off point) for odds ratio', required=True)
parser.add_argument('-y', '--maf_cut_off', type=float, help='Threshold (cut-off point) for MAF', required=True)
parser.add_argument('-z', '--pval_cut_off', type=float, help='Threshold (cut-off point) for p-value', required=True)
parser.add_argument('-o', '--output_dir', type=str, help='Path to save the output file.', required=True)
args = parser.parse_args()
def get_user_IDs(case_control_IDs_file):
with open(case_control_IDs_file) as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for index, row in enumerate(spamreader):
if index == 0:
case_IDs = row
if index == 1:
control_IDs = row
return case_IDs, control_IDs
def compute_statistics(a, b, c, d):
table = sm.stats.Table2x2(np.array([[a, b], [c, d]]))
low_interval, high_interval = table.oddsratio_confint(alpha=0.05, method='normal')
column_names = ['odds_ratio', 'low_interval', 'high_interval', 'se', 'p_val']
return pd.Series(["%.8f" % table.oddsratio, "%.8f" % low_interval, "%.8f" % high_interval, "%.8f" % table.log_oddsratio_se, "%.8f" % table.log_oddsratio_pvalue()], index=column_names)
def estimate_value(dataframe, state):
p = np.exp(args.epsilon) / (np.exp(args.epsilon) + 2)
q = 1 / (np.exp(args.epsilon) + 2)
n = len(dataframe.columns)
if (p != q):
ci = dataframe.apply(lambda x: (x == state).sum(), axis=1)
cpi = ((ci - n * q) / (p - q))
cpi = cpi.apply(np.int64)
return cpi
else:
return dataframe.apply(lambda x: (x == state).sum(), axis=1)
#if aggregation technique is used, the verifier estimates the actual occurrences of 0, 1, and 2
def perform_GWAS(dataframe, case_IDs, control_IDs, aggregation):
case_dataframe = dataframe[case_IDs]
control_dataframe = dataframe[control_IDs]
if aggregation == False:
dataframe['case_0'] = case_dataframe.apply(lambda x: (x == 0).sum(), axis=1)
dataframe['case_1'] = case_dataframe.apply(lambda x: (x == 1).sum(), axis=1)
dataframe['case_2'] = case_dataframe.apply(lambda x: (x == 2).sum(), axis=1)
dataframe['control_0'] = control_dataframe.apply(lambda x: (x == 0).sum(), axis=1)
dataframe['control_1'] = control_dataframe.apply(lambda x: (x == 1).sum(), axis=1)
dataframe['control_2'] = control_dataframe.apply(lambda x: (x == 2).sum(), axis=1)
else:
dataframe['case_0'] = estimate_value(case_dataframe, 0)
dataframe['case_1'] = estimate_value(case_dataframe, 1)
dataframe['case_2'] = estimate_value(case_dataframe, 2)
dataframe['control_0'] = estimate_value(control_dataframe, 0)
dataframe['control_1'] = estimate_value(control_dataframe, 1)
dataframe['control_2'] = estimate_value(control_dataframe, 2)
dataframe['case_major'] = dataframe['case_0']
dataframe['case_minor'] = dataframe['case_1'] + dataframe['case_2']
dataframe['case_minor_counts'] = dataframe['case_1'] + 2 * dataframe['case_2']
dataframe['control_major'] = dataframe['control_0']
dataframe['control_minor'] = dataframe['control_1'] + dataframe['control_2']
dataframe['control_minor_counts'] = dataframe['control_1'] + 2 * dataframe['control_2']
dataframe_GWAS = dataframe.apply(
lambda x: compute_statistics(x['case_minor'], x['control_minor'], x['case_major'], x['control_major']), axis=1)
dataframe_GWAS['alt'] = dataframe['alt']
dataframe_GWAS['ref'] = dataframe['ref']
dataframe_GWAS['case_MAF'] = dataframe['case_minor_counts'] / (2.0 * len(case_IDs))
dataframe_GWAS['control_MAF'] = dataframe['control_minor_counts'] / (2.0 * len(control_IDs))
dataframe_GWAS['MAF'] = (dataframe['case_minor_counts'] + dataframe['control_minor_counts']) / (
2.0 * (len(case_IDs) + len(control_IDs)))
return dataframe_GWAS
def randomized_response(val, p, q):
rand_val = np.random.uniform(0, 1)
new_val = val
if rand_val > p:
if rand_val > p + q:
if val == 1:
new_val = 2
else:
new_val = 2 - val
else:
new_val = abs(1 - val)
return new_val
def generate_noisy_dataframe(dataframe, user_IDs):
p = np.exp(args.epsilon) / (np.exp(args.epsilon) + 2)
q = 1 / (np.exp(args.epsilon) + 2)
for j in range(len(user_IDs)):
dataframe[str(user_IDs[j])] = dataframe.apply(lambda x: randomized_response(x[str(user_IDs[j])], p, q), axis=1)
return dataframe
def compute_relative_error(original_df, noisy_df, SNP_list):
column_names = ['odds_ratio', 'MAF', 'p_val']
RE_df = pd.DataFrame(columns=column_names)
for SNP in SNP_list:
odds_RE = MAF_RE = p_val_RE = 0.0000001
if float(original_df.at[SNP, 'odds_ratio']) != 0:
odds_RE = abs(float(original_df.at[SNP, 'odds_ratio']) - float(noisy_df.at[SNP, 'odds_ratio'])) / float(original_df.at[SNP, 'odds_ratio'])
if float(original_df.at[SNP, 'MAF']) != 0:
MAF_RE = abs(float(original_df.at[SNP, 'MAF']) - float(noisy_df.at[SNP, 'MAF'])) / float(original_df.at[SNP, 'MAF'])
original_log_pval = -1*math.log(float(original_df.at[SNP, 'p_val']))
noisy_log_pval = -1*math.log(float(noisy_df.at[SNP, 'p_val']))
if original_log_pval != 0:
p_val_RE = abs(original_log_pval-noisy_log_pval)/ original_log_pval
RE_to_append = {"odds_ratio": odds_RE, 'MAF': MAF_RE, 'p_val': p_val_RE}
RE_df = RE_df.append(RE_to_append, ignore_index=True)
RE_df.index = SNP_list
return RE_df
def compute_error(D_RE, E_RE, SNP_list):
column_names = ['odds_ratio', 'MAF', 'p_val']
error_df = pd.DataFrame(columns=column_names)
for i in range(len(SNP_list)):
odds_error = MAF_error = p_val_error = 0.0000001
if E_RE.iat[i, 0] != 0:
odds_error = abs(float(D_RE.iat[i, 0]) - float(E_RE.iat[i, 0])) / float(E_RE.iat[i, 0])
if E_RE.iat[i, 1] != 0:
MAF_error = abs(float(D_RE.iat[i, 1]) - float(E_RE.iat[i, 1])) / float(E_RE.iat[i, 1])
if E_RE.iat[i, 2] != 0:
p_val_error = abs(float(D_RE.iat[i, 2]) - float(E_RE.iat[i, 2])) / float(E_RE.iat[i, 2])
error_to_append = {"odds_ratio": odds_error, 'MAF': MAF_error, 'p_val': p_val_error}
error_df = error_df.append(error_to_append, ignore_index=True)
error_df.index = SNP_list
return error_df
def check_correctness(error_df):
# Select column odds_ratio from the dataframe
odds_column = error_df["odds_ratio"]
odds_correct = odds_column[odds_column < args.odds_cut_off].count()
odds_incorrect = len(error_df.index) - odds_correct
print("Odds ratio results: " + str(odds_correct) + " are correct and " + str(odds_incorrect) +" are incorrect.")
maf_column = error_df["MAF"]
maf_correct = maf_column[maf_column < args.maf_cut_off].count()
maf_incorrect = len(error_df.index) - maf_correct
print("MAF results: " + str(maf_correct) + " are correct and " + str(maf_incorrect) +" are incorrect.")
pval_column = error_df["p_val"]
pval_correct = pval_column[pval_column < args.pval_cut_off].count()
pval_incorrect = len(error_df.index) - pval_correct
print("p-value results: " + str(pval_correct) + " are correct and " + str(pval_incorrect) +" are incorrect.")
#Besides printing the general results, we also generate a dataframe that keeps track for each SNP and statistic whether it was classified as correct or incorrect
error_df['odds_ratio_correctness'] = error_df['odds_ratio'] < args.odds_cut_off
error_df['MAF_correctness'] = error_df['MAF'] < args.maf_cut_off
error_df['p_val_correctness'] = error_df['p_val'] < args.pval_cut_off
error_df[["odds_ratio_correctness", "MAF_correctness", "p_val_correctness"]].replace({False: "Incorrect", True: "Correct"}, inplace=True)
return error_df
if __name__ == "__main__":
# Get case user IDs and control user IDs of dataset D
D_case_IDs, D_control_IDs = get_user_IDs(args.D_case_control_IDs_file)
D_user_IDs = D_case_IDs + D_control_IDs
# Get case user IDs and control user IDs of dataset E
E_case_IDs, E_control_IDs = get_user_IDs(args.E_case_control_IDs_file)
E_user_IDs = E_case_IDs + E_control_IDs
# Load GWAS of dataset D
D_GWAS = | pd.read_csv(args.D_dataset_GWAS_file, sep=',', index_col=0) | pandas.read_csv |
from __future__ import print_function
import os
import sys
###########################################################
# Change to your own library path
###########################################################
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
import pytz
# date_time format
date_time_format = '%Y-%m-%dT%H:%M:%S.%f'
date_format = '%Y-%m-%d'
ema_col = ['id', 'survey_type', 'delivered_ts', 'completed_ts', 'activity', 'location', 'atypical', 'stress',
'stressor_partner', 'stressor_fam', 'stressor_breakdown', 'stressor_money', 'stressor_selfcare', 'stressor_health',
'stressor_otherhealth', 'stressor_household', 'stressor_child', 'stressor_discrimination', 'stressor_none',
'moststressful', 'moststressful_time', 'work_location', 'attend_fidam', 'attend_fidpm', 'attend_hasp',
'attend_pgy1did', 'attend_pgy2did', 'attend_pgy3did', 'attend_none', 'work_start', 'work_end',
'jobperformance', 'jobperformance_best', 'jobsatisfaction', 'sleepquant', 'sleepqual', 'alcoholuse',
'alcohol_total', 'tobaccouse', 'tobacco_total', 'physactivity', 'physactivity_total',
'workstressor_computer', 'workstressor_patientint', 'workstressor_conflict', 'workstressor_census',
'workstressor_late', 'workstressor_paged', 'workstressor_supervise', 'workstressor_admin',
'workstressor_diffcases', 'workstressor_death', 'charting', 'charting_total', 'coworkertrust',
'work_inperson', 'work_digital', 'support_inperson', 'support_digital', 'socialevents', 'hangouts', 'wellness']
pt = pytz.timezone('US/Pacific')
def make_dir(data_path):
if os.path.exists(data_path) is False:
os.mkdir(data_path)
def check_micu_data_valid(data_time, start_date1, end_date1, start_date2, end_date2):
cond1 = (pd.to_datetime(data_time) - pd.to_datetime(start_date1)).total_seconds() >= 0
cond2 = (pd.to_datetime(end_date1) + timedelta(days=1) - pd.to_datetime(data_time)).total_seconds() >= 0
cond3 = False
cond4 = False
if start_date2 != 'nan':
cond3 = (pd.to_datetime(data_time) - pd.to_datetime(start_date2)).total_seconds() >= 0
cond4 = (pd.to_datetime(end_date2) + timedelta(days=1) - pd.to_datetime(data_time)).total_seconds() >= 0
if (cond1 and cond2):
return True
elif (cond3 and cond4):
return True
else:
return False
if __name__ == '__main__':
# Read data root path
participant_info_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir, 'participant-info'))
saving_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir, 'tiles-phase2-opendataset'))
# id,started_ts,completed_ts,duration,weekcompleted,gender,traininglevel
# Phase1Training_IGTB.csv
save_igtb_df = pd.DataFrame()
study_period = pd.read_csv(os.path.join(participant_info_path, 'study-periods.csv'), index_col=0)
ema_df = pd.read_csv(os.path.join(saving_path, 'surveys', 'p2_ema_public_5.21.csv'))
stress_coded_df = pd.read_csv(os.path.join(saving_path, 'surveys', 'MostStressful_SDLS.csv'))
best_coded_df = pd.read_csv(os.path.join(saving_path, 'surveys', 'PerformBest_SDLS.csv'))
atypical_coded_df = pd.read_csv(os.path.join(saving_path, 'surveys', 'Atypical_SDLS.csv'))
participant_list = list(study_period.index)
participant_list.sort()
micu_df = pd.read_csv(os.path.join(participant_info_path, 'p2_micuschedules_public_5.21.csv'), index_col=0)
micu_df = micu_df.dropna(subset=['MICU Start Date 1'])
final_ema_df = pd.DataFrame()
for id in participant_list:
participant_df = ema_df.loc[ema_df['id'] == id]
micu_start1 = pd.to_datetime(micu_df.loc[id, 'MICU Start Date 1']).strftime(date_time_format)[:-3]
micu_end1 = pd.to_datetime(micu_df.loc[id, 'MICU End Date 1']).strftime(date_time_format)[:-3]
micu_start2 = str(micu_df.loc[id, 'MICU Start Date 2'])
micu_end2 = str(micu_df.loc[id, 'MICU End Date 2'])
if 'e7dc' in id:
print()
if str(micu_start2) != 'nan':
micu_start2 = pd.to_datetime(micu_start2).strftime(date_time_format)[:-3]
number_of_days1 = int((pd.to_datetime(micu_end1) - pd.to_datetime(micu_start1)).total_seconds() / (24 * 3600)) + 1
number_of_days2 = int((pd.to_datetime(micu_end2) - pd.to_datetime(micu_start2)).total_seconds() / (24 * 3600))
left_days = 21 - number_of_days1
if left_days:
micu_end2 = (pd.to_datetime(micu_start2) + timedelta(days=left_days)).strftime(date_time_format)[:-3]
else:
micu_end1 = ( | pd.to_datetime(micu_start1) | pandas.to_datetime |
import talib
import pandas as pd
def process_data(eod_data):
# Create a pandas data frame
data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], | lrange(4) | pandas.compat.lrange |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
| ujson.decode(broken_json) | pandas._libs.json.decode |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import argparse
import tarfile
from collections import defaultdict
def load_stats(fn, num=300):
tdf, nmidf = defaultdict(list), defaultdict(list)
tar = tarfile.open(fn, 'r:gz')
for member in tar.getmembers():
if not member.name.endswith('stat'):
continue
f = tar.extractfile(member)
name = member.name.rpartition('.')[0]
for line in f:
t, nmi = map(float, line.strip().split())
tdf[name].append(t)
nmidf[name].append(nmi)
missing = num - len(tdf[name])
if missing > 0:
tdf[name] += [np.nan] * missing
nmidf[name] += [np.nan] * missing
return pd.DataFrame(data=tdf), | pd.DataFrame(data=nmidf) | pandas.DataFrame |
import argparse
import json
import logging
import time
from pathlib import Path
from typing import List, Dict, Optional
import numpy as np
import pandas as pd
from timeeval import Algorithm, Status, Datasets, Metric
from timeeval.adapters.docker import SCORES_FILE_NAME as DOCKER_SCORES_FILE_NAME
from timeeval.constants import RESULTS_CSV, HYPER_PARAMETERS, METRICS_CSV, ANOMALY_SCORES_TS
from timeeval.data_types import ExecutionType
from timeeval.experiments import Experiment as TimeEvalExperiment
from timeeval.utils.datasets import load_labels_only
# required to build a lookup-table for algorithm implementations
import timeeval_experiments.algorithms as algorithms
# noinspection PyUnresolvedReferences
from timeeval_experiments.algorithms import *
from timeeval_experiments.baselines import Baselines
INITIAL_WAITING_SECONDS = 5
def path_is_empty(path: Path) -> bool:
return not any(path.iterdir())
class Evaluator:
def __init__(self, results_path: Path, data_path: Path, metrics: List[Metric]):
self._logger = logging.getLogger(self.__class__.__name__)
self.results_path = results_path
self.data_path = data_path
self.metrics = metrics
self.algos = self._build_algorithm_dict()
self.dmgr = Datasets(data_path, create_if_missing=False)
self.df: pd.DataFrame = pd.read_csv(results_path / RESULTS_CSV)
self._logger.warning(f"The Evaluator changes the results folder ({self.results_path}) in-place! "
"If you do not want this, cancel this script using Ctrl-C! "
f"Waiting {INITIAL_WAITING_SECONDS} seconds before continuing ...")
time.sleep(INITIAL_WAITING_SECONDS)
@staticmethod
def _build_algorithm_dict() -> Dict[str, Algorithm]:
algo_names = [a for a in dir(algorithms) if not a.startswith("__")]
algo_list: List[Algorithm] = [eval(f"{a}()") for a in algo_names]
algos: Dict[str, Algorithm] = {}
for a in algo_list:
algos[a.name] = a
# add baselines
increasing_baseline = Baselines.increasing()
algos[increasing_baseline.name] = increasing_baseline
random_baseline = Baselines.random()
algos[random_baseline.name] = random_baseline
normal_baseline = Baselines.normal()
algos[normal_baseline.name] = normal_baseline
# aliases for some renamed algorithms:
algos["Image-embedding-CAE"] = algos["ImageEmbeddingCAE"]
algos["LTSM-VAE"] = algos["LSTM-VAE"]
return algos
def evaluate(self, select_index: Optional[Path], evaluate_successful: bool = False):
if select_index is None:
exp_indices = self.df.index.values
else:
exp_indices = pd.read_csv(select_index).iloc[:, 0]
self._logger.info(f"Re-evaluating {len(exp_indices)} experiments from {len(self.df)} experiments of "
f"folder {self.results_path}")
for i in exp_indices:
s_exp: pd.Series = self.df.iloc[i]
if not evaluate_successful and s_exp.status == "Status.OK":
self._logger.info(f"Exp-{i:06d}: Skipping, because experiment was successful.")
continue
self._logger.info(f"Exp-{i:06d}: Starting processing ...")
exp_path = self._exp_path(s_exp)
docker_scores_path = exp_path / DOCKER_SCORES_FILE_NAME
processed_scores_path = exp_path / ANOMALY_SCORES_TS
params_path = exp_path / HYPER_PARAMETERS
metrics_path = exp_path / METRICS_CSV
if not docker_scores_path.exists() or not params_path.exists():
self._logger.error(f"Exp-{i:06d}: Experiment ({s_exp.algorithm}-{s_exp.collection}-{s_exp.dataset}) "
"does not contain any results to start with (scores or hyper params are missing)!")
continue
y_true = load_labels_only(self.dmgr.get_dataset_path((s_exp.collection, s_exp.dataset)))
if not evaluate_successful and processed_scores_path.exists():
self._logger.debug(f"Exp-{i:06d}: Skipping reprocessing of anomaly scores, they are present.")
y_scores = np.genfromtxt(processed_scores_path, delimiter=",")
else:
self._logger.debug(f"Exp-{i:06d}: Processing anomaly scores.")
y_scores = np.genfromtxt(docker_scores_path, delimiter=",")
post_fn = self.algos[s_exp.algorithm].postprocess
if post_fn is not None:
with params_path.open("r") as fh:
hyper_params = json.load(fh)
dataset = self.dmgr.get(s_exp.collection, s_exp.dataset)
args = {
"executionType": ExecutionType.EXECUTE,
"results_path": exp_path,
"hyper_params": hyper_params,
"dataset_details": dataset
}
y_scores = post_fn(y_scores, args)
_, y_scores = TimeEvalExperiment.scale_scores(y_true, y_scores)
self._logger.info(f"Exp-{i:06d}: Writing anomaly scores to {processed_scores_path}.")
y_scores.tofile(str(processed_scores_path), sep="\n")
if not metrics_path.exists():
metric_scores = {}
else:
metric_scores = | pd.read_csv(metrics_path) | pandas.read_csv |
import logging
import re
import numpy as np
import pandas as pd
class CleanStandings:
'''[summary]
'''
def __init__(self, page:dict):
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
self.logger.info('-----------------------------------------------------------------------------------')
self.logger.info(f'Initializing {__name__}.')
self.standings = page['Standings']
def make_df(self) -> pd.DataFrame:
'''Convert standings to pandas dataframe
'''
standings = self.standings
#first list is headers of table so pop it
standingsHeaders = standings.pop(0)
#not really required but some standings misbehave..
#specially in Premier League so this is fail safe to ensure correct header names
#standingsHeaders = ['Pos', 'Team', 'Pld', 'W', 'D' , 'L' , 'GF', 'GA', 'GD', 'Pts']
#clean up standings table to ensure only 10 values remain in list
#last 11th value in list is useless anyway
#standings = [standing[:10]for standing in standings]
#pprint.pprint(standings)
#convert standings to dataframe using above list as headers
df = pd.DataFrame(standings, columns=standingsHeaders)
#start row index with 1 so it can also act as position
#could delete 'Pos' column but keeping it just in case
df.index = df.index + 1
#convert numeric strings to int
#wasted some real time here trying to figure this out
#'-' won't work due to intricacies of language :/
#u"\u2212" is unicode for dash
#x = [int('-' + re.sub("\\D", "", i)) if '-' in i else int(re.sub("\\D", "", i)) for i in list(df['GD'])]
for header in list(df.columns.values):
if 'Team' not in header:
df[header] = [int('-' + re.sub("\\D", "", i)) if u"\u2212" in i else int(re.sub("\\D", "", i)) for i in list(df[header])]
self.logger.debug(f'Standings df = {df}')
return df
class CleanResults:
'''[summary]
'''
def __init__(self, page:dict):
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
self.logger.info('-----------------------------------------------------------------------------------')
self.logger.info(f'Initializing {__name__}.')
#for convenience unpacking it here..
results = page['Results']
self.headers = results['headers']
self.teams = results['teams']
self.results = results['results']
self.totalGames = results['totalGames']
self.homeTeamWins = results['homeTeamWins']
self.awayTeamWins = results['awayTeamWins']
self.draws = results['draws']
def make_win_statistics_df(self) -> pd.DataFrame:
'''make wins statistics pandas dataframe
'''
#idk why this doesn't works tbh
#wins = [[self.totalGames], [self.homeTeamWins], [self.awayTeamWins], [self.draws]]
#df = pd.DataFrame(data=wins, columns=['TotalGames', 'HomeWins', 'AwayWins', 'Draws'])
wins = wins = [[self.totalGames], [self.homeTeamWins], [self.awayTeamWins], [self.draws]]
columns=['Total Games', 'Home Wins', 'Away Wins', 'Draws']
df = pd.DataFrame(dict(zip(columns, wins)))
return df
def make_results_df(self):
'''[summary]
'''
#lol could have caused major headaches later
#copying because actually new list is needed thus preserving original one
headers = self.teams.copy()
headers.insert(0, 'Goals')
resultsDfList = []
for l in self.results:
#first value in row is team name
row = l[1:]
#re.sub('[\\(\\[].*?[\\)\\]]', '', header.strip())
x = [(int(re.sub('[\\(\\[].*?[\\)\\]]', '', element[0])), int(re.sub('[\\(\\[].*?[\\)\\]]', '', element[2]))) if len(element) == 3 else (np.nan, np.nan) for element in row]
#zip returns iterable
for_ = list(list(zip(*x))[0])
against = list(list(zip(*x))[1])
for_.insert(0, 'GF')
against.insert(0, 'GA')
#print(headers)
#print(for_)
#print(against)
df = | pd.DataFrame([for_, against], columns=headers) | pandas.DataFrame |
# -*- coding: utf-8 -*-
### Import required python modules
from gevent import monkey
monkey.patch_all()
import platform
import os
from os import listdir, stat, makedirs, mkdir, walk, remove, pardir, rename
from os.path import (
isdir,
isfile,
join,
splitext,
getmtime,
basename,
normpath,
exists,
expanduser,
split,
dirname,
getsize,
abspath,
)
import pandas as pd
import time
from time import strftime, localtime
import shutil
from shutil import copy2
from configparser import ConfigParser
import numpy as np
from collections import defaultdict
import subprocess
from websocket import create_connection
import socket
import errno
import re
import gevent
from pennsieve import Pennsieve
from pennsieve.log import get_logger
from pennsieve.api.agent import AgentError, check_port, socket_address, agent_cmd
from urllib.request import urlopen
import json
import collections
from threading import Thread
import pathlib
import io
from contextlib import redirect_stdout
from datetime import datetime, timezone
from validator_soda import (
pathToJsonStruct,
validate_high_level_folder_structure,
validate_high_level_metadata_files,
validate_sub_level_organization,
validate_submission_file,
validate_dataset_description_file,
)
from pysoda import (
clear_queue,
agent_running,
check_forbidden_characters,
check_forbidden_characters_bf,
bf_dataset_size,
)
from organize_datasets import bf_get_dataset_files_folders
### Global variables
curateprogress = " "
curatestatus = " "
curateprintstatus = " "
total_dataset_size = 1
curated_dataset_size = 0
start_time = 0
uploaded_folder_counter = 0
current_size_of_uploaded_files = 0
generated_dataset_id = None
userpath = expanduser("~")
configpath = join(userpath, ".pennsieve", "config.ini")
submitdataprogress = " "
submitdatastatus = " "
submitprintstatus = " "
total_file_size = 1
uploaded_file_size = 0
start_time_bf_upload = 0
start_submit = 0
metadatapath = join(userpath, "SODA", "SODA_metadata")
bf_recognized_file_extensions = [
".cram",
".jp2",
".jpx",
".lsm",
".ndpi",
".nifti",
".oib",
".oif",
".roi",
".rtf",
".swc",
".abf",
".acq",
".adicht",
".adidat",
".aedt",
".afni",
".ai",
".avi",
".bam",
".bash",
".bcl",
".bcl.gz",
".bin",
".brik",
".brukertiff.gz",
".continuous",
".cpp",
".csv",
".curv",
".cxls",
".czi",
".data",
".dcm",
".df",
".dicom",
".doc",
".docx",
".e",
".edf",
".eps",
".events",
".fasta",
".fastq",
".fcs",
".feather",
".fig",
".gif",
".h4",
".h5",
".hdf4",
".hdf5",
".hdr",
".he2",
".he5",
".head",
".hoc",
".htm",
".html",
".ibw",
".img",
".ims",
".ipynb",
".jpeg",
".jpg",
".js",
".json",
".lay",
".lh",
".lif",
".m",
".mat",
".md",
".mef",
".mefd.gz",
".mex",
".mgf",
".mgh",
".mgh.gz",
".mgz",
".mnc",
".moberg.gz",
".mod",
".mov",
".mp4",
".mph",
".mpj",
".mtw",
".ncs",
".nd2",
".nev",
".nex",
".nex5",
".nf3",
".nii",
".nii.gz",
".ns1",
".ns2",
".ns3",
".ns4",
".ns5",
".ns6",
".nwb",
".ogg",
".ogv",
".ome.btf",
".ome.tif",
".ome.tif2",
".ome.tif8",
".ome.tiff",
".ome.xml",
".openephys",
".pdf",
".pgf",
".png",
".ppt",
".pptx",
".ps",
".pul",
".py",
".r",
".raw",
".rdata",
".rh",
".rhd",
".sh",
".sldasm",
".slddrw",
".smr",
".spikes",
".svg",
".svs",
".tab",
".tar",
".tar.gz",
".tcsh",
".tdm",
".tdms",
".text",
".tif",
".tiff",
".tsv",
".txt",
".vcf",
".webm",
".xlsx",
".xml",
".yaml",
".yml",
".zip",
".zsh",
]
bf = ""
myds = ""
initial_bfdataset_size = 0
upload_directly_to_bf = 0
initial_bfdataset_size_submit = 0
forbidden_characters = '<>:"/\|?*'
forbidden_characters_bf = '\/:*?"<>'
# a global that tracks the amount of files that have been uploaded in an upload session;
# is reset once the session ends by success, or failure (is implicitly reset in case of Pennsieve Agent freeze by the user closing SODA)
main_curation_uploaded_files = 0
DEV_TEMPLATE_PATH = join(dirname(__file__), "..", "file_templates")
# once pysoda has been packaged with pyinstaller
# it becomes nested into the pysodadist/api directory
PROD_TEMPLATE_PATH = join(dirname(__file__), "..", "..", "file_templates")
TEMPLATE_PATH = DEV_TEMPLATE_PATH if exists(DEV_TEMPLATE_PATH) else PROD_TEMPLATE_PATH
### Internal functions
def TZLOCAL():
return datetime.now(timezone.utc).astimezone().tzinfo
def open_file(file_path):
"""
Opening folder on all platforms
https://stackoverflow.com/questions/6631299/python-opening-a-folder-in-explorer-nautilus-mac-thingie
Args:
file_path: path of the folder (string)
Action:
Opens file explorer window to the given path
"""
try:
if platform.system() == "Windows":
subprocess.Popen(r"explorer /select," + str(file_path))
elif platform.system() == "Darwin":
subprocess.Popen(["open", file_path])
else:
subprocess.Popen(["xdg-open", file_path])
except Exception as e:
raise e
def folder_size(path):
"""
Provides the size of the folder indicated by path
Args:
path: path of the folder (string)
Returns:
total_size: total size of the folder in bytes (integer)
"""
total_size = 0
start_path = "." # To get size of current directory
for path, dirs, files in walk(path):
for f in files:
fp = join(path, f)
total_size += getsize(fp)
return total_size
def path_size(path):
"""
Returns size of the path, after checking if it's a folder or a file
Args:
path: path of the file/folder (string)
Returns:
total_size: total size of the file/folder in bytes (integer)
"""
if isdir(path):
return folder_size(path)
else:
return getsize(path)
def create_folder_level_manifest(jsonpath, jsondescription):
"""
Function to create manifest files for each SPARC folder.
Files are created in a temporary folder
Args:
datasetpath: path of the dataset (string)
jsonpath: all paths in json format with key being SPARC folder names (dictionary)
jsondescription: description associated with each path (dictionary)
Action:
Creates manifest files in xslx format for each SPARC folder
"""
global total_dataset_size
local_timezone = TZLOCAL()
try:
datasetpath = metadatapath
shutil.rmtree(datasetpath) if isdir(datasetpath) else 0
makedirs(datasetpath)
folders = list(jsonpath.keys())
if "main" in folders:
folders.remove("main")
# In each SPARC folder, generate a manifest file
for folder in folders:
if jsonpath[folder] != []:
# Initialize dataframe where manifest info will be stored
df = pd.DataFrame(
columns=[
"filename",
"timestamp",
"description",
"file type",
"Additional Metadata",
]
)
# Get list of files/folders in the the folder
# Remove manifest file from the list if already exists
folderpath = join(datasetpath, folder)
allfiles = jsonpath[folder]
alldescription = jsondescription[folder + "_description"]
manifestexists = join(folderpath, "manifest.xlsx")
countpath = -1
for pathname in allfiles:
countpath += 1
if (
basename(pathname) == "manifest.csv"
or basename(pathname) == "manifest.xlsx"
):
allfiles.pop(countpath)
alldescription.pop(countpath)
# Populate manifest dataframe
filename, timestamp, filetype, filedescription = [], [], [], []
countpath = -1
for paths in allfiles:
if isdir(paths):
key = basename(paths)
alldescription.pop(0)
for subdir, dirs, files in os.walk(paths):
for file in files:
gevent.sleep(0)
filepath = pathlib.Path(paths) / subdir / file
mtime = filepath.stat().st_mtime
lastmodtime = datetime.fromtimestamp(mtime).astimezone(
local_timezone
)
timestamp.append(
lastmodtime.isoformat()
.replace(".", ",")
.replace("+00:00", "Z")
)
full_filename = filepath.name
if folder == "main": # if file in main folder
filename.append(
full_filename
) if folder == "" else filename.append(
join(folder, full_filename)
)
else:
subdirname = os.path.relpath(
subdir, paths
) # gives relative path of the directory of the file w.r.t paths
if subdirname == ".":
filename.append(join(key, full_filename))
else:
filename.append(
join(key, subdirname, full_filename)
)
fileextension = splitext(full_filename)[1]
if (
not fileextension
): # if empty (happens e.g. with Readme files)
fileextension = "None"
filetype.append(fileextension)
filedescription.append("")
else:
gevent.sleep(0)
countpath += 1
filepath = pathlib.Path(paths)
file = filepath.name
filename.append(file)
mtime = filepath.stat().st_mtime
lastmodtime = datetime.fromtimestamp(mtime).astimezone(
local_timezone
)
timestamp.append(
lastmodtime.isoformat()
.replace(".", ",")
.replace("+00:00", "Z")
)
filedescription.append(alldescription[countpath])
if isdir(paths):
filetype.append("folder")
else:
fileextension = splitext(file)[1]
if (
not fileextension
): # if empty (happens e.g. with Readme files)
fileextension = "None"
filetype.append(fileextension)
df["filename"] = filename
df["timestamp"] = timestamp
df["file type"] = filetype
df["description"] = filedescription
makedirs(folderpath)
# Save manifest as Excel sheet
manifestfile = join(folderpath, "manifest.xlsx")
df.to_excel(manifestfile, index=None, header=True)
total_dataset_size += path_size(manifestfile)
jsonpath[folder].append(manifestfile)
return jsonpath
except Exception as e:
raise e
def return_new_path(topath):
"""
This function checks if a folder already exists and in such cases,
appends (1) or (2) etc. to the folder name
Args:
topath: path where the folder is supposed to be created (string)
Returns:
topath: new folder name based on the availability in destination folder (string)
"""
if exists(topath):
i = 1
while True:
if not exists(topath + " (" + str(i) + ")"):
return topath + " (" + str(i) + ")"
i += 1
else:
return topath
def return_new_path_replace(topath):
"""
This function checks if a folder already exists and in such cases,
replace the existing folder (this is the opposite situation to the function return_new_path)
Args:
topath: path where the folder is supposed to be created (string)
Returns:
topath: new folder name based on the availability in destination folder (string)
"""
if exists(topath):
i = 1
while True:
if not exists(topath + " (" + str(i) + ")"):
return topath + " (" + str(i) + ")"
i += 1
else:
return topath
def time_format(elapsed_time):
mins, secs = divmod(elapsed_time, 60)
hours, mins = divmod(mins, 60)
return "%dh:%02dmin:%02ds" % (hours, mins, secs)
def mycopyfileobj(fsrc, fdst, length=16 * 1024 * 16):
"""
Helper function to copy file
Args:
fsrc: source file opened in python (file-like object)
fdst: destination file accessed in python (file-like object)
length: copied buffer size in bytes (integer)
"""
global curateprogress
global total_dataset_size
global curated_dataset_size
global main_generated_dataset_size
while True:
buf = fsrc.read(length)
if not buf:
break
gevent.sleep(0)
fdst.write(buf)
curated_dataset_size += len(buf)
main_generated_dataset_size += len(buf)
def mycopyfile_with_metadata(src, dst, *, follow_symlinks=True):
"""
Copy file src to dst with metadata (timestamp, permission, etc.) conserved
Args:
src: source file (string)
dst: destination file (string)
Returns:
dst
"""
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, "rb") as fsrc:
with open(dst, "wb") as fdst:
mycopyfileobj(fsrc, fdst)
shutil.copystat(src, dst)
return dst
### Prepare dataset
# def save_file_organization(jsonpath, jsondescription, jsonpathmetadata, pathsavefileorganization):
# """
# Associated with 'Save' button in the SODA interface
# Saves the paths and associated descriptions from the interface table to a CSV file for future use
# Each json key (SPARC foler name) becomes a header in the CSV
# Args:
# jsonpath: paths of all files (dictionary)
# jsondescription: description associated with each file (dictionary)
# pathsavefileorganization: destination path for CSV file to be saved (string)
# Action:
# Creates CSV file with path and description for files in SPARC folders
# """
# try:
# mydict = jsonpath
# mydict2 = jsondescription
# mydict3 = jsonpathmetadata
# mydict.update(mydict2)
# mydict.update(mydict3)
# dictkeys = list(mydict.keys())
# dictkeys.sort()
# df = pd.DataFrame(columns=[dictkeys[0]])
# df[dictkeys[0]] = mydict[dictkeys[0]]
# for i in range(1,len(dictkeys)):
# dfnew = pd.DataFrame(columns=[dictkeys[i]])
# dfnew[dictkeys[i]] = mydict[dictkeys[i]]
# df = pd.concat([df, dfnew], axis=1)
# df = df.replace(np.nan, '', regex=True)
# csvsavepath = join(pathsavefileorganization)
# df.to_csv(csvsavepath, index = None, header=True)
# return 'Saved!'
# except Exception as e:
# raise e
# def import_file_organization(pathuploadfileorganization, headernames):
# """
# Associated with 'Import' button in the SODA interface
# Import previously saved progress (CSV file) for viewing in the SODA interface
# Args:
# pathuploadfileorganization: path of previously saved CSV file (string)
# headernames: names of SPARC folder (list of strings)
# Returns:
# mydict: dictionary with headers of CSV file as keys and cell contents as list of strings for each key
# """
# try:
# csvsavepath = join(pathuploadfileorganization)
# df = pd.read_csv(csvsavepath)
# dfnan = df.isnull()
# mydict = {}
# mydictmetadata ={}
# dictkeys = df.columns
# compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
# if not compare(dictkeys, headernames):
# raise Exception("Error: Please select a valid file")
# rowcount = len(df.index)
# for i in range(len(dictkeys)):
# pathvect = []
# for j in range(rowcount):
# pathval = df.at[j, dictkeys[i]]
# if not dfnan.at[j, dictkeys[i]]:
# pathvect.append(pathval)
# else:
# pathvect.append("")
# if dictkeys[i] == 'metadata':
# mydictmetadata[dictkeys[i]] = pathvect
# else:
# mydict[dictkeys[i]] = pathvect
# return [mydict, mydictmetadata]
# except Exception as e:
# raise e
# def create_preview_files(paths, folder_path):
# """
# Creates folders and empty files from original 'paths' to the destination 'folder_path'
# Args:
# paths: paths of all the files that need to be copied (list of strings)
# folder_path: Destination to which the files / folders need to be copied (string)
# Action:
# Creates folders and empty files at the given 'folder_path'
# """
# try:
# for p in paths:
# gevent.sleep(0)
# if isfile(p):
# file = basename(p)
# open(join(folder_path, file), 'a').close()
# else:
# all_files = listdir(p)
# all_files_path = []
# for f in all_files:
# all_files_path.append(join(p, f))
# pname = basename(p)
# new_folder_path = join(folder_path, pname)
# makedirs(new_folder_path)
# create_preview_files(all_files_path, new_folder_path)
# return
# except Exception as e:
# raise e
# def preview_file_organization(jsonpath):
# """
# Associated with 'Preview' button in the SODA interface
# Creates a folder for preview and adds mock files from SODA table (same name as origin but 0 kb in size)
# Opens the dialog box to showcase the files / folders added
# Args:
# jsonpath: dictionary containing all paths (keys are SPARC folder names)
# Action:
# Opens the dialog box at preview_path
# Returns:
# preview_path: path of the folder where the preview files are located
# """
# mydict = jsonpath
# preview_path = join(userpath, "SODA", "Preview")
# try:
# if isdir(preview_path):
# delete_preview_file_organization()
# makedirs(preview_path)
# else:
# makedirs(preview_path)
# except Exception as e:
# raise e
# try:
# folderrequired = []
# for i in mydict.keys():
# if mydict[i] != []:
# folderrequired.append(i)
# if i != 'main':
# makedirs(join(preview_path, i))
# def preview_func(folderrequired, preview_path):
# for i in folderrequired:
# paths = mydict[i]
# if (i == 'main'):
# create_preview_files(paths, join(preview_path))
# else:
# create_preview_files(paths, join(preview_path, i))
# output = []
# output.append(gevent.spawn(preview_func, folderrequired, preview_path))
# gevent.sleep(0)
# gevent.joinall(output)
# if len(listdir(preview_path)) > 0:
# folder_in_preview = listdir(preview_path)[0]
# open_file(join(preview_path, folder_in_preview))
# else:
# open_file(preview_path)
# return preview_path
# except Exception as e:
# raise e
# def delete_preview_file_organization():
# """
# Associated with 'Delete Preview Folder' button of the SODA interface
# Action:
# Deletes the 'Preview' folder from the disk
# """
# try:
# userpath = expanduser("~")
# preview_path = join(userpath, "SODA", "Preview")
# if isdir(preview_path):
# shutil.rmtree(preview_path, ignore_errors=True)
# else:
# raise Exception("Error: Preview folder not present or already deleted!")
# except Exception as e:
# raise e
def create_dataset(jsonpath, pathdataset):
"""
Associated with 'Create new dataset locally' option of SODA interface
for creating requested folders and files to the destination path specified
Args:
jsonpath: all paths (dictionary, keys are SPARC folder names)
pathdataset: destination path for creating a new dataset as specified (string)
Action:
Creates the folders and files specified
"""
global curateprogress
try:
mydict = jsonpath
folderrequired = []
# create SPARC folder structure
for i in mydict.keys():
if mydict[i] != []:
folderrequired.append(i)
if i != "main":
makedirs(join(pathdataset, i))
# create all subfolders and generate a list of all files to copy
listallfiles = []
for i in folderrequired:
if i == "main":
outputpath = pathdataset
else:
outputpath = join(pathdataset, i)
for tablepath in mydict[i]:
if isdir(tablepath):
foldername = basename(tablepath)
outputpathdir = join(outputpath, foldername)
if not os.path.isdir(outputpathdir):
os.mkdir(outputpathdir)
for dirpath, dirnames, filenames in os.walk(tablepath):
distdir = os.path.join(
outputpathdir, os.path.relpath(dirpath, tablepath)
)
if not os.path.isdir(distdir):
os.mkdir(distdir)
for file in filenames:
srcfile = os.path.join(dirpath, file)
distfile = os.path.join(distdir, file)
listallfiles.append([srcfile, distfile])
else:
srcfile = tablepath
file = basename(tablepath)
distfile = os.path.join(outputpath, file)
listallfiles.append([srcfile, distfile])
# copy all files to corresponding folders
for fileinfo in listallfiles:
srcfile = fileinfo[0]
distfile = fileinfo[1]
curateprogress = "Copying " + str(srcfile)
mycopyfile_with_metadata(srcfile, distfile)
except Exception as e:
raise e
def bf_get_current_user_permission(bf, myds):
"""
Function to get the permission of currently logged in user for a selected dataset
Args:
bf: logged Pennsieve acccount (dict)
myds: selected Pennsieve dataset (dict)
Output:
permission of current user (string)
"""
try:
selected_dataset_id = myds.id
user_role = bf._api._get("/datasets/" + str(selected_dataset_id) + "/role")[
"role"
]
return user_role
except Exception as e:
raise e
### Validate dataset
def validate_dataset(validator_input):
try:
if type(validator_input) is str:
jsonStruct = pathToJsonStruct(validator_input)
elif type(validator_input) is dict:
jsonStruct = validator_input
else:
raise Exception(
"Error: validator input must be string (path to dataset) or a SODA JSON Structure/Python dictionary"
)
res = []
validatorHighLevelFolder = validate_high_level_folder_structure(jsonStruct)
validatorObj = validatorHighLevelFolder
resitem = {}
resitem["pass"] = validatorObj.passes
resitem["warnings"] = validatorObj.warnings
resitem["fatal"] = validatorObj.fatal
res.append(resitem)
(
validatorHighLevelMetadataFiles,
isSubmission,
isDatasetDescription,
isSubjects,
isSamples,
) = validate_high_level_metadata_files(jsonStruct)
validatorObj = validatorHighLevelMetadataFiles
resitem = {}
resitem["pass"] = validatorObj.passes
resitem["warnings"] = validatorObj.warnings
resitem["fatal"] = validatorObj.fatal
res.append(resitem)
validatorSubLevelOrganization = validate_sub_level_organization(jsonStruct)
validatorObj = validatorSubLevelOrganization
resitem = {}
resitem["pass"] = validatorObj.passes
resitem["warnings"] = validatorObj.warnings
resitem["fatal"] = validatorObj.fatal
res.append(resitem)
if isSubmission == 1:
metadataFiles = jsonStruct["main"]
for f in metadataFiles:
fullName = os.path.basename(f)
if os.path.splitext(fullName)[0] == "submission":
subFilePath = f
validatorSubmissionFile = validate_submission_file(subFilePath)
validatorObj = validatorSubmissionFile
resitem = {}
resitem["pass"] = validatorObj.passes
resitem["warnings"] = validatorObj.warnings
resitem["fatal"] = validatorObj.fatal
res.append(resitem)
elif isSubmission == 0:
resitem = {}
resitem["warnings"] = [
"Include a 'submission' file in a valid format to check it through the validator"
]
res.append(resitem)
elif isSubmission > 1:
resitem = {}
resitem["warnings"] = [
"Include a unique 'submission' file to check it through the validator"
]
res.append(resitem)
if isDatasetDescription == 1:
metadataFiles = jsonStruct["main"]
for f in metadataFiles:
fullName = os.path.basename(f)
if os.path.splitext(fullName)[0] == "dataset_description":
ddFilePath = f
validatorDatasetDescriptionFile = validate_dataset_description_file(
ddFilePath
)
validatorObj = validatorDatasetDescriptionFile
resitem = {}
resitem["pass"] = validatorObj.passes
resitem["warnings"] = validatorObj.warnings
resitem["fatal"] = validatorObj.fatal
res.append(resitem)
elif isDatasetDescription == 0:
resitem = {}
resitem["warnings"] = [
"Include a 'dataset_description' file in a valid format to check it through the validator"
]
res.append(resitem)
elif isDatasetDescription > 1:
resitem = {}
resitem["warnings"] = [
"Include a unique 'dataset_description' file to check it through the validator"
]
res.append(resitem)
return res
except Exception as e:
raise e
"""
------------------------------------------
NEW
FUNCTIONS
------------------------------------------
"""
def bf_dataset_size():
"""
Function to get storage size of a dataset on Pennsieve
"""
global bf
global myds
try:
selected_dataset_id = myds.id
bf_response = bf._api._get("/datasets/" + str(selected_dataset_id))
return bf_response["storage"] if "storage" in bf_response.keys() else 0
except Exception as e:
raise e
def check_empty_files_folders(soda_json_structure):
"""
Function to check for empty files and folders
Args:
soda_json_structure: soda dict with information about all specified files and folders
Output:
error: error message with list of non valid local data files, if any
"""
try:
def recursive_empty_files_check(my_folder, my_relative_path, error_files):
for folder_key, folder in my_folder["folders"].items():
relative_path = my_relative_path + "/" + folder_key
error_files = recursive_empty_files_check(
folder, relative_path, error_files
)
for file_key in list(my_folder["files"].keys()):
file = my_folder["files"][file_key]
file_type = file["type"]
if file_type == "local":
file_path = file["path"]
if isfile(file_path):
file_size = getsize(file_path)
if file_size == 0:
del my_folder["files"][file_key]
relative_path = my_relative_path + "/" + file_key
error_message = relative_path + " (path: " + file_path + ")"
error_files.append(error_message)
return error_files
def recursive_empty_local_folders_check(
my_folder,
my_folder_key,
my_folders_content,
my_relative_path,
error_folders,
):
folders_content = my_folder["folders"]
for folder_key in list(my_folder["folders"].keys()):
folder = my_folder["folders"][folder_key]
relative_path = my_relative_path + "/" + folder_key
error_folders = recursive_empty_local_folders_check(
folder, folder_key, folders_content, relative_path, error_folders
)
if not my_folder["folders"]:
if not my_folder["files"]:
ignore = False
if "type" in my_folder:
if my_folder["type"] == "bf":
ignore = True
if ignore == False:
error_message = my_relative_path
error_folders.append(error_message)
del my_folders_content[my_folder_key]
return error_folders
error_files = []
error_folders = []
if "dataset-structure" in soda_json_structure.keys():
dataset_structure = soda_json_structure["dataset-structure"]
if "folders" in dataset_structure:
for folder_key, folder in dataset_structure["folders"].items():
relative_path = folder_key
error_files = recursive_empty_files_check(
folder, relative_path, error_files
)
folders_content = dataset_structure["folders"]
for folder_key in list(dataset_structure["folders"].keys()):
folder = dataset_structure["folders"][folder_key]
relative_path = folder_key
error_folders = recursive_empty_local_folders_check(
folder,
folder_key,
folders_content,
relative_path,
error_folders,
)
if "metadata-files" in soda_json_structure.keys():
metadata_files = soda_json_structure["metadata-files"]
for file_key in list(metadata_files.keys()):
file = metadata_files[file_key]
file_type = file["type"]
if file_type == "local":
file_path = file["path"]
if isfile(file_path):
file_size = getsize(file_path)
if file_size == 0:
del metadata_files[file_key]
error_message = file_key + " (path: " + file_path + ")"
error_files.append(error_message)
if not metadata_files:
del soda_json_structure["metadata-files"]
if len(error_files) > 0:
error_message = [
"The following local file(s) is/are empty (0 kb) and will be ignored."
]
error_files = error_message + [] + error_files
if len(error_folders) > 0:
error_message = [
"The following folder(s) is/are empty or only contain(s) empty file(s), and will be ignored."
]
error_folders = error_message + [] + error_folders
return [error_files, error_folders, soda_json_structure]
except Exception as e:
raise e
def check_local_dataset_files_validity(soda_json_structure):
"""
Function to check that the local data files and folders specified in the dataset are valid
Args:
soda_json_structure: soda dict with information about all specified files and folders
Output:
error: error message with list of non valid local data files, if any
"""
def recursive_local_file_check(my_folder, my_relative_path, error):
for folder_key, folder in my_folder["folders"].items():
relative_path = my_relative_path + "/" + folder_key
error = recursive_local_file_check(folder, relative_path, error)
for file_key in list(my_folder["files"].keys()):
file = my_folder["files"][file_key]
file_type = file["type"]
if file_type == "local":
file_path = file["path"]
if file["type"] == "bf":
continue
if not isfile(file_path):
relative_path = my_relative_path + "/" + file_key
error_message = relative_path + " (path: " + file_path + ")"
error.append(error_message)
else:
file_size = getsize(file_path)
if file_size == 0:
del my_folder["files"][file_key]
return error
def recursive_empty_local_folder_remove(
my_folder, my_folder_key, my_folders_content
):
folders_content = my_folder["folders"]
for folder_key in list(my_folder["folders"].keys()):
folder = my_folder["folders"][folder_key]
recursive_empty_local_folder_remove(folder, folder_key, folders_content)
if not my_folder["folders"]:
if not my_folder["files"]:
if my_folder["type"] != "bf":
del my_folders_content[my_folder_key]
error = []
if "dataset-structure" in soda_json_structure.keys():
dataset_structure = soda_json_structure["dataset-structure"]
if "folders" in dataset_structure:
for folder_key, folder in dataset_structure["folders"].items():
relative_path = folder_key
error = recursive_local_file_check(folder, relative_path, error)
folders_content = dataset_structure["folders"]
for folder_key in list(dataset_structure["folders"].keys()):
folder = dataset_structure["folders"][folder_key]
recursive_empty_local_folder_remove(folder, folder_key, folders_content)
if "metadata-files" in soda_json_structure.keys():
metadata_files = soda_json_structure["metadata-files"]
for file_key in list(metadata_files.keys()):
file = metadata_files[file_key]
file_type = file["type"]
if file_type == "local":
file_path = file["path"]
if not isfile(file_path):
error_message = file_key + " (path: " + file_path + ")"
error.append(error_message)
else:
file_size = getsize(file_path)
if file_size == 0:
del metadata_files[file_key]
if not metadata_files:
del soda_json_structure["metadata-files"]
if len(error) > 0:
error_message = [
"Error: The following local files were not found. Specify them again or remove them."
]
error = error_message + error
return error
# path to local SODA folder for saving manifest files
manifest_sparc = ["manifest.xlsx", "manifest.csv"]
manifest_folder_path = join(userpath, "SODA", "manifest_files")
def create_high_level_manifest_files(soda_json_structure):
"""
Function to create manifest files for each high-level SPARC folder.
Args:
soda_json_structure: soda dict with information about the dataset to be generated/modified
Action:
manifest_files_structure: dict including the local path of the manifest files
"""
double_extensions = [
".ome.tiff",
".ome.tif",
".ome.tf2,",
".ome.tf8",
".ome.btf",
".ome.xml",
".brukertiff.gz",
".mefd.gz",
".moberg.gz",
".nii.gz",
".mgh.gz",
".tar.gz",
".bcl.gz",
]
try:
def get_name_extension(file_name):
double_ext = False
for ext in double_extensions:
if file_name.find(ext) != -1:
double_ext = True
break
ext = ""
name = ""
if double_ext == False:
name = os.path.splitext(file_name)[0]
ext = os.path.splitext(file_name)[1]
else:
ext = (
os.path.splitext(os.path.splitext(file_name)[0])[1]
+ os.path.splitext(file_name)[1]
)
name = os.path.splitext(os.path.splitext(file_name)[0])[0]
return name, ext
def recursive_manifest_builder(
my_folder, my_relative_path, dict_folder_manifest
):
if "files" in my_folder.keys():
for file_key, file in my_folder["files"].items():
gevent.sleep(0)
dict_folder_manifest = file_manifest_entry(
file_key, file, my_relative_path, dict_folder_manifest
)
if "folders" in my_folder.keys():
for folder_key, folder in my_folder["folders"].items():
if my_relative_path:
relative_path = my_relative_path + "/" + folder_key
else:
relative_path = folder_key
dict_folder_manifest = recursive_manifest_builder(
folder, relative_path, dict_folder_manifest
)
return dict_folder_manifest
def file_manifest_entry(file_key, file, relative_path, dict_folder_manifest):
# filename
if relative_path:
filename = relative_path + "/" + file_key
else:
filename = file_key
dict_folder_manifest["filename"].append(filename)
# timestamp
file_type = file["type"]
if file_type == "local":
file_path = file["path"]
filepath = pathlib.Path(file_path)
mtime = filepath.stat().st_mtime
lastmodtime = datetime.fromtimestamp(mtime).astimezone(local_timezone)
dict_folder_manifest["timestamp"].append(
lastmodtime.isoformat().replace(".", ",").replace("+00:00", "Z")
)
elif file_type == "bf":
dict_folder_manifest["timestamp"].append(file["timestamp"])
# description
if "description" in file.keys():
dict_folder_manifest["description"].append(file["description"])
else:
dict_folder_manifest["description"].append("")
# file type
fileextension = ""
name_split = splitext(file_key)
if name_split[1] == "":
fileextension = "None"
else:
unused_file_name, fileextension = get_name_extension(file_key)
# fileextension = name_split[1]
dict_folder_manifest["file type"].append(fileextension)
# addtional metadata
if "additional-metadata" in file.keys():
dict_folder_manifest["Additional Metadata"].append(
file["additional-metadata"]
)
else:
dict_folder_manifest["Additional Metadata"].append("")
return dict_folder_manifest
# create local folder to save manifest files temporarly (delete any existing one first)
shutil.rmtree(manifest_folder_path) if isdir(manifest_folder_path) else 0
makedirs(manifest_folder_path)
dataset_structure = soda_json_structure["dataset-structure"]
local_timezone = TZLOCAL()
manifest_files_structure = {}
for folder_key, folder in dataset_structure["folders"].items():
# Initialize dict where manifest info will be stored
dict_folder_manifest = {}
dict_folder_manifest["filename"] = []
dict_folder_manifest["timestamp"] = []
dict_folder_manifest["description"] = []
dict_folder_manifest["file type"] = []
dict_folder_manifest["Additional Metadata"] = []
relative_path = ""
dict_folder_manifest = recursive_manifest_builder(
folder, relative_path, dict_folder_manifest
)
# create high-level folder at the temporary location
folderpath = join(manifest_folder_path, folder_key)
makedirs(folderpath)
# save manifest file
manifestfilepath = join(folderpath, "manifest.xlsx")
df = | pd.DataFrame.from_dict(dict_folder_manifest) | pandas.DataFrame.from_dict |
#%%
import os
import itertools
import cloudpickle
import re
import glob
import statsmodels.api as sm
import git
# Our numerical workhorses
import numpy as np
import pandas as pd
import scipy as sp
# Import library to perform maximum entropy fits
from maxentropy.skmaxent import FeatureTransformer, MinDivergenceModel
# Import libraries to parallelize processes
from joblib import Parallel, delayed
# Import the project utils
import ccutils
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Define directories for data and figure
datadir = f'{homedir}/data/csv_maxEnt_dist/'
#%%
# Load moments for multi-promoter level
df_constraints = pd.read_csv(
f'{datadir}MaxEnt_multi_prom_constraints.csv'
)
print('reading distribution moments')
# Remove the zeroth moment column
df_constraints = df_constraints.drop(labels="m0p0", axis=1)
# %%
print('Finding multiplicative factor for noise')
# Read moments for multi-promoter model
df_mom_rep = pd.read_csv(datadir + 'MaxEnt_multi_prom_constraints.csv')
# Read experimental determination of noise
df_noise = pd.read_csv(f'{homedir}/data/csv_microscopy/' +
'microscopy_noise_bootstrap.csv')
# Find the mean unregulated levels to compute the fold-change
mean_m_delta = np.mean(df_mom_rep[df_mom_rep.repressor == 0].m1p0)
mean_p_delta = np.mean(df_mom_rep[df_mom_rep.repressor == 0].m0p1)
# Compute the noise for the multi-promoter data
df_mom_rep = df_mom_rep.assign(
m_noise=(
np.sqrt(df_mom_rep.m2p0 - df_mom_rep.m1p0 ** 2) / df_mom_rep.m1p0
),
p_noise=(
np.sqrt(df_mom_rep.m0p2 - df_mom_rep.m0p1 ** 2) / df_mom_rep.m0p1
),
m_fold_change=df_mom_rep.m1p0 / mean_m_delta,
p_fold_change=df_mom_rep.m0p1 / mean_p_delta,
)
# Initialize list to save theoretical noise
thry_noise = list()
# Iterate through rows
for idx, row in df_noise.iterrows():
# Extract information
rep = float(row.repressor)
op = row.operator
if np.isnan(row.IPTG_uM):
iptg = 0
else:
iptg = row.IPTG_uM
# Extract equivalent theoretical prediction
thry = df_mom_rep[(df_mom_rep.repressor == rep) &
(df_mom_rep.operator == op) &
(df_mom_rep.inducer_uM == iptg)].p_noise
# Append to list
thry_noise.append(thry.iloc[0])
df_noise = df_noise.assign(noise_theory = thry_noise)
# Linear regression to find multiplicative factor
# Extract fold-change
fc = df_noise.fold_change.values
# Set values for ∆lacI to be fold-change 1
fc[np.isnan(fc)] = 1
# Normalize weights
weights = fc / fc.sum()
# Declare linear regression model
wls_model = sm.WLS(df_noise.noise.values,
df_noise.noise_theory.values,
weights=weights)
# Fit parameter
results = wls_model.fit()
noise_factor = results.params[0]
# %%
print('Increasing noise')
# Compute variance
p_var = df_constraints['m0p2'] - df_constraints['m0p1']**2
# Update second moment
df_constraints['m0p2'] = (noise_factor**2) * df_constraints['m0p2'] - \
(noise_factor**2 - 1) * df_constraints['m0p1']**2
# %%
print('Finding multiplicative factor for skewness')
# Read moments for multi-promoter model
df_mom_rep = pd.read_csv(datadir + 'MaxEnt_multi_prom_constraints.csv')
# Read experimental determination of noise
df_noise = pd.read_csv(f'{homedir}/data/csv_microscopy/' +
'microscopy_noise_bootstrap.csv')
# Find the mean unregulated levels to compute the fold-change
mean_m_delta = np.mean(df_mom_rep[df_mom_rep.repressor == 0].m1p0)
mean_p_delta = np.mean(df_mom_rep[df_mom_rep.repressor == 0].m0p1)
# Compute the skewness for the multi-promoter data
m_mean = df_mom_rep.m1p0
p_mean = df_mom_rep.m0p1
m_var = df_mom_rep.m2p0 - df_mom_rep.m1p0 ** 2
p_var = df_mom_rep.m0p2 - df_mom_rep.m0p1 ** 2
df_mom_rep = df_mom_rep.assign(
m_skew=(df_mom_rep.m3p0 - 3 * m_mean * m_var - m_mean**3)
/ m_var**(3 / 2),
p_skew=(df_mom_rep.m0p3 - 3 * p_mean * p_var - p_mean**3)
/ p_var**(3 / 2),
)
# Initialize list to save theoretical noise
thry_skew = list()
# Iterate through rows
for idx, row in df_noise.iterrows():
# Extract information
rep = float(row.repressor)
op = row.operator
if np.isnan(row.IPTG_uM):
iptg = 0
else:
iptg = row.IPTG_uM
# Extract equivalent theoretical prediction
thry = df_mom_rep[(df_mom_rep.repressor == rep) &
(df_mom_rep.operator == op) &
(df_mom_rep.inducer_uM == iptg)].p_skew
# Append to list
thry_skew.append(thry.iloc[0])
df_noise = df_noise.assign(skew_theory = thry_skew)
# Extract fold-change
fc = df_noise.fold_change.values
# Set values for ∆lacI to be fold-change 1
fc[np.isnan(fc)] = 1
# Normalize weights
weights = fc / fc.sum()
# Declare linear regression model
wls_model = sm.WLS(df_noise.skewness.values,
df_noise.skew_theory.values,
weights=weights)
# Fit parameter
results = wls_model.fit()
skew_factor = results.params[0]
# Update third moment
print('Increasing skewness')
df_constraints['m0p3'] = (8 * skew_factor * df_constraints['m0p3'] - \
(24 * skew_factor - 12) * df_constraints['m0p1'] * p_var - \
(8 * skew_factor - 1) * df_constraints['m0p1']**3)
#%%
# Extract protein moments in constraints
prot_mom = [x for x in df_constraints.columns if "m0" in x]
# Define index of moments to be used in the computation
moments = [tuple(map(int, re.findall(r"\d+", s))) for s in prot_mom][0:3]
print(f'moments to be used for inference: {moments}')
# Define sample space
mRNA_space = np.array([0]) # Dummy space
protein_space = np.arange(0, 10e4)
# Generate sample space as a list of pairs using itertools.
samplespace = list(itertools.product(mRNA_space, protein_space))
# Initialize matrix to save all the features that are fed to the
# maxentropy function
features = np.zeros([len(moments), len(samplespace)])
# Loop through constraints and compute features
for i, mom in enumerate(moments):
features[i, :] = [ccutils.maxent.feature_fn(x, mom) for x in samplespace]
#%%
# Initialize data frame to save the lagrange multipliers.
names = ["operator", "binding_energy", "repressor", "inducer_uM"]
# Add names of the constraints
names = names + ["lambda_m" + str(m[0]) + "p" + str(m[1]) for m in moments]
# Initialize empty dataframe
df_maxEnt = pd.DataFrame([], columns=names)
# Define column names containing the constraints used to fit the distribution
constraints_names = ["m" + str(m[0]) + "p" + str(m[1]) for m in moments]
# Define function for parallel computation
def maxEnt_parallel(idx, df):
# Report on progress
print("iteration: ", idx)
# Extract constraints
constraints = df.loc[constraints_names]
# Perform MaxEnt computation
# We use the Powell method because despite being slower it is more
# robust than the other implementations.
Lagrange = ccutils.maxent.MaxEnt_bretthorst(
constraints,
features,
algorithm="Powell",
tol=1e-5,
paramtol=1e-5,
maxiter=10000,
)
# Save Lagrange multipliers into dataframe
series = pd.Series(Lagrange, index=names[4::])
# Add other features to series before appending to dataframe
series = pd.concat([df.drop(constraints_names), series])
return series
# Run the function in parallel
maxEnt_series = Parallel(n_jobs=6)(
delayed(maxEnt_parallel)(idx, df)
for idx, df in df_constraints.iterrows()
)
# Initialize data frame to save list of parameters
df_maxEnt = | pd.DataFrame([], columns=names) | pandas.DataFrame |
#!/usr/bin/env python
# ROS imports
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
# Custom ROS imports
from av_msgs.msg import Mode, States
from prius_msgs.msg import Control
# Python imports
import cv2
import pandas as pd
import os
import time
class Collector:
def __init__(self):
# Front camera
self.front_camera_topic_name = "/prius/front_camera/image_raw"
self.front_camera_subscriber = rospy.Subscriber(self.front_camera_topic_name, data_class=Image,
callback=self.front_camera_image_callback, queue_size=1,
buff_size=2 ** 24)
# Prius mode
self.prius_mode_topic = "/prius/mode"
self.prius_mode_sub = rospy.Subscriber(self.prius_mode_topic, Mode, self.prius_mode_callback)
self.collect = False
# Prius state
self.prius_states_topic = "/prius/states"
self.prius_state_sub = rospy.Subscriber(self.prius_states_topic, States, self.prius_state_callback)
self.velocity = 0 # km/h
self.steer_angle = 0.0 # TODO: radians? or degrees?
# Prius callback
self.prius_topic = "/prius"
self.prius_sub = rospy.Subscriber(self.prius_topic, Control, self.prius_callback)
self.steer = 0.0
# Cv Bridge
self.cv_bridge = CvBridge()
# Pandas dataframe with labels
self.df_labels = | pd.DataFrame(data={"img_name": [], "velocity": [], "steer_angle": [], "steer": []}) | pandas.DataFrame |
import pickle
from typing import Any, Dict, Iterable
import numpy as np
import pandas as pd
from numpy.lib.function_base import iterable
from pandas.api.types import CategoricalDtype
from pandas.core.groupby import DataFrameGroupBy
from scipy.sparse import hstack
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import OneHotEncoder
from spacy.language import Language
from spacy.strings import StringStore
from spacy.tokens import Doc, Span
from .functional import _convert_series_to_array, _get_label
class EndLinesModel:
"""Model to classify if an end line is a real one or it should be a space.
Parameters
----------
nlp : Language
spaCy nlp pipeline to use for matching.
"""
def __init__(self, nlp: Language):
self.nlp = nlp
def _preprocess_data(self, corpus: Iterable[Doc]) -> pd.DataFrame:
"""
Parameters
----------
corpus : Iterable[Doc]
Corpus of documents
Returns
-------
pd.DataFrame
Preprocessed data
"""
# Extract the vocabulary
string_store = self.nlp.vocab.strings
# Iterate in the corpus and construct a dataframe
train_data_list = []
for i, doc in enumerate(corpus):
train_data_list.append(self._get_attributes(doc, i))
df = pd.concat(train_data_list)
df.reset_index(inplace=True, drop=False)
df.rename(columns={"ORTH": "A1", "index": "original_token_index"}, inplace=True)
# Retrieve string representation of token_id and shape
df["TEXT"] = df.A1.apply(self._get_string, string_store=string_store)
df["SHAPE_"] = df.SHAPE.apply(self._get_string, string_store=string_store)
# Convert new lines as an attribute instead of a row
df = self._convert_line_to_attribute(df, expr="\n", col="END_LINE")
df = self._convert_line_to_attribute(df, expr="\n\n", col="BLANK_LINE")
df = df.loc[~(df.END_LINE | df.BLANK_LINE)]
df = df.drop(columns="END_LINE")
df = df.drop(columns="BLANK_LINE")
df.rename(
columns={"TEMP_END_LINE": "END_LINE", "TEMP_BLANK_LINE": "BLANK_LINE"},
inplace=True,
)
# Construct A2 by shifting
df = self._shift_col(df, "A1", "A2", direction="backward")
# Compute A3 and A4
df = self._compute_a3(df)
df = self._shift_col(df, "A3", "A4", direction="backward")
# SPACE is the class to predict. Set 1 if not an END_LINE
df["SPACE"] = np.logical_not(df["END_LINE"]).astype("int")
df[["END_LINE", "BLANK_LINE"]] = df[["END_LINE", "BLANK_LINE"]].fillna(
True, inplace=False
)
# Assign a sentence id to each token
df = df.groupby("DOC_ID").apply(self._retrieve_lines)
df["SENTENCE_ID"] = df["SENTENCE_ID"].astype("int")
# Compute B1 and B2
df = self._compute_B(df)
# Drop Tokens without info (last token of doc)
df.dropna(subset=["A1", "A2", "A3", "A4"], inplace=True)
# Export the vocabularies to be able to use the model with another corpus
voc_a3a4 = self._create_vocabulary(df.A3_.cat.categories)
voc_B2 = self._create_vocabulary(df.cv_bin.cat.categories)
voc_B1 = self._create_vocabulary(df.l_norm_bin.cat.categories)
vocabulary = {"A3A4": voc_a3a4, "B1": voc_B1, "B2": voc_B2}
self.vocabulary = vocabulary
return df
def fit_and_predict(self, corpus: Iterable[Doc]) -> pd.DataFrame:
"""Fit the model and predict for the training data
Parameters
----------
corpus : Iterable[Doc]
An iterable of Documents
Returns
-------
pd.DataFrame
one line by end_line prediction
"""
# Preprocess data to have a pd DF
df = self._preprocess_data(corpus)
# Train and predict M1
self._fit_M1(df.A1, df.A2, df.A3, df.A4, df.SPACE)
outputs_M1 = self._predict_M1(
df.A1,
df.A2,
df.A3,
df.A4,
)
df["M1"] = outputs_M1["predictions"]
df["M1_proba"] = outputs_M1["predictions_proba"]
# Force Blank lines to 0
df.loc[df.BLANK_LINE, "M1"] = 0
# Train and predict M2
df_endlines = df.loc[df.END_LINE]
self._fit_M2(B1=df_endlines.B1, B2=df_endlines.B2, label=df_endlines.M1)
outputs_M2 = self._predict_M2(B1=df_endlines.B1, B2=df_endlines.B2)
df.loc[df.END_LINE, "M2"] = outputs_M2["predictions"]
df.loc[df.END_LINE, "M2_proba"] = outputs_M2["predictions_proba"]
df["M2"] = df["M2"].astype(
| pd.Int64Dtype() | pandas.Int64Dtype |
import os
import pandas as pd
import numpy as np
import pyddem.tdem_tools as tt
in_ext = '/home/atom/ongoing/work_worldwide/tables/table_man_gard_zemp_wout.csv'
df_ext = | pd.read_csv(in_ext) | pandas.read_csv |
import pytest
from math import isclose
from numpy import log10
from pandas import Series
from cellengine.utils.scale_utils import apply_scale
@pytest.fixture(scope="module")
def scale():
return {"minimum": 5, "maximum": 10, "type": "LogScale"}
def test_should_apply_scale(scale):
# fmt: off
input = Series([
-20, 0, 1e-40, 0.01, 0.2, 0.5, 0.9999, 1, 1.00001,
2, 5, 10, 100, 250, 500, 1000, 5000, 10000, 50000,
5e5, 5e6, 5e7, 5e8, 5e9, 5e10, 5e11, 5e12, 5e13, 5e14,
5e15, 5e16, 5e17
])
# fmt: on
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, False))
# fmt: off
expected = Series([
0, 0, 0, 0, 0, 0, 0, 0, 0.00000434292310445319, 0.30102999566398114,
0.6989700043360186, 1, 2, 2.397940008672037, 2.6989700043360183, 3,
3.6989700043360187, 4, 4.698970004336018, 5.698970004336018,
6.698970004336018, 7.698970004336018, 8.698970004336018,
9.698970004336018, 10.698970004336018, 11.698970004336018,
12.698970004336018, 13.698970004336018, 14.698970004336018,
15.698970004336018, 16.698970004336018, 17.698970004336018,
])
# fmt: on
assert [isclose(a, b, rel_tol=0.00001) for a, b in zip(output, expected)]
def test_should_apply_clamped(scale):
# fmt: off
input = Series([
-20, 0, 0.01, 0.2, 0.5, 1, 2, 5, 10,
100, 250, 500, 1000, 5000, 10000, 50000
])
# fmt: on
output = Series([], dtype="float64")
MINV = 0.6989700043360186
MAXV = 1
output = input.map(lambda a: apply_scale(scale, a, True))
# fmt: off
expected = Series([
MINV, MINV, MINV, MINV, MINV, MINV, MINV,
0.6989700043360186, 1, MAXV, MAXV, MAXV,
MAXV, MAXV, MAXV, MAXV,
])
# fmt: on
assert [isclose(a, b, rel_tol=0.00001) for a, b in zip(output, expected)]
def test_should_handle_0_length_arrays(scale):
input = Series([], dtype="float64")
output = Series([], dtype="float64")
output = | Series([], dtype="float64") | pandas.Series |
"""
Class Features
Name: lib_data_io_nc
Author(s): <NAME> (<EMAIL>)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Libraries
import logging
import os
import netCDF4
import time
import re
import warnings
import numpy as np
import xarray as xr
import pandas as pd
from copy import deepcopy
from hmc.algorithm.io.lib_data_io_generic import reshape_var3d, create_darray_3d, create_darray_2d
from hmc.algorithm.default.lib_default_args import logger_name, time_units, time_calendar, time_format_algorithm
from hmc.algorithm.utils.lib_utils_system import create_folder
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to write collections
def write_collections(file_name, file_data, file_time, file_attrs=None):
time_date_list = []
time_str_list = []
for time_stamp_step in file_time:
time_str_step = time_stamp_step.strftime(format=time_format_algorithm)
time_str_list.append(time_str_step)
time_date_step = time_stamp_step.to_pydatetime()
time_date_list.append(time_date_step)
# File operation(s)
file_handle = netCDF4.Dataset(file_name, 'w')
file_handle.createDimension('time', len(file_time))
# File attribute(s)
if file_attrs is not None:
for attr_key, attr_value in file_attrs.items():
file_handle.setncattr(attr_key, attr_value)
# Time information
file_time_num = file_handle.createVariable(varname='time', dimensions=('time',), datatype='float32')
file_time_num[:] = netCDF4.date2num(time_date_list, units=time_units, calendar=time_calendar)
file_time_str = file_handle.createVariable(varname='times', dimensions=('time',), datatype='str')
file_time_str[:] = np.array(time_str_list, dtype=object)
# Add file creation date
file_handle.file_date = 'Created ' + time.ctime(time.time())
for file_key, file_dict in file_data.items():
file_values = list(file_dict.values())
if isinstance(file_values[0], str):
file_data = np.array(file_values, dtype=object)
file_var = file_handle.createVariable(varname=file_key, dimensions=('time',), datatype='str')
elif isinstance(file_values[0], (int, float)):
file_data = file_values
file_var = file_handle.createVariable(varname=file_key, dimensions=('time',), datatype='f4')
else:
log_stream.error(' ===> Variable format in collections is not allowed!')
raise IOError('Bad format of array')
file_var[:] = file_data
file_handle.close()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read data
def read_data(file_name_list, var_name=None, var_time_start=None, var_time_end=None, var_time_freq='H',
coord_name_time='time', coord_name_geo_x='Longitude', coord_name_geo_y='Latitude',
dim_name_time='time', dim_name_geo_x='west_east', dim_name_geo_y='south_north'):
# File n
file_n = file_name_list.__len__()
if var_name is None:
log_stream.error(' ===> Variable name is undefined!')
raise IOError('Variable name is a mandatory argument!')
else:
if isinstance(var_name, list):
var_name = var_name[0]
file_check_list = []
for file_name_step in file_name_list:
if os.path.exists(file_name_step):
file_check_list.append(True)
else:
file_check_list.append(False)
file_check = any(el for el in file_check_list)
# Open datasets
if file_check:
if file_n == 1:
datetime_tmp = pd.date_range(start=var_time_start, end=var_time_end, freq=var_time_freq)
datetime_idx_select = pd.DatetimeIndex(datetime_tmp)
if os.path.exists(file_name_list[0]):
try:
dst_tmp = xr.open_dataset(file_name_list[0])
if ('time' not in list(dst_tmp.coords)) and ('time' not in list(dst_tmp.dims)):
log_stream.warning(
' ===> Time dimensions and coordinates are not included in filename \n "' +
file_name_list[0] + '". \n Time dimensions and coordinates will be assigned '
'using the first step of the reference time period "' +
str(datetime_idx_select[0]) + '".\n')
datetime_idx_tmp = pd.DatetimeIndex([datetime_idx_select[0]])
dst_tmp['time'] = datetime_idx_tmp
dst_tmp = dst_tmp.set_coords('time')
if 'time' not in list(dst_tmp.dims):
dst_tmp = dst_tmp.expand_dims('time')
# Check the time steps of datasets and expected and in case of nan's, fill with nearest values
if dst_tmp['time'].__len__() > 1:
datetime_idx_dst_tmp = | pd.DatetimeIndex(dst_tmp['time'].values) | pandas.DatetimeIndex |
# -*- coding:utf-8 -*-
import os
import json
from flask import send_from_directory
from datetime import timedelta
from dateutil.relativedelta import relativedelta
import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
app = dash.Dash()
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
# dateparse = lambda dates : pd.datetime(dates, '%Y%m%dT%H:%M')
df = | pd.read_csv('./data/result.csv') | pandas.read_csv |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
| tm.assert_frame_equal(store["df"], df) | pandas.util.testing.assert_frame_equal |
from losses import SeparableMTLoss
from regularizations import Regularization
from . import Fit, ConvergenceError
import numpy as np
import pandas as pd
from time import time
from joblib import Parallel, delayed
def _ridge(loss, rho, beta, d, b, tr):
return loss.ridge(1. / rho, beta - d, b, threshold=tr)
class ConsensusADMM(Fit):
def __init__(
self,
loss: SeparableMTLoss,
reg: Regularization,
**kwargs
):
super().__init__(loss, reg, **kwargs)
def _solve(self, beta: np.ndarray, lam: float, l: int):
"""
Parameters
----------
beta : array-like
The initial parameter value. (p, K)
lam : float
The regularization parameter.
Returns
-------
beta : array-like
The final estimate. (p, K)
Raises
------
ConvergenceError
If the solver does not reach appropriate convergence.
"""
b, d, t = self._initialize(beta, lam)
while True:
t += 1
# update
t0 = time()
beta, d, r, n_ridge = self._update(b, beta, d, lam)
dt = time() - t0
# logging
loss, original_obj = self._log(b, beta, lam, l, r, t, n_ridge, dt)
# norms for convergence
eps_dual, eps_primal, r_norm, s_norm = self._compute_convergence_checks(b, beta, d, r)
# convergence checks
if r_norm < eps_primal and s_norm < eps_dual:
break
if t > self.max_iter:
print(self.log_solve)
raise ConvergenceError("maximum number of iteration reached.")
if self.verbose > 1:
print(self.log_solve)
return beta, t, loss, original_obj
def _update(self, b, beta, d, lam):
# update B
n_ridge = np.array([0 for _ in self.loss])
tr = np.sqrt(self.loss.data.n_features * self.loss.data.n_tasks) * self.eps_abs
if self.parallel:
out = Parallel(n_jobs=len(self.loss), prefer="threads")(
delayed(_ridge)(loss, self.rho, beta[:, [k]], d[:, [k]], b[:, [k]], tr)
for k, loss in enumerate(self.loss.values())
)
for k, (bk, n) in enumerate(out):
b[:, [k]], n_ridge[k] = bk, n
else:
for k, (task, loss) in enumerate(self.loss.items()):
b[:, [k]], n_ridge[k] = loss.ridge(
1. / self.rho,
beta[:, [k]] - d[:, [k]],
b[:, [k]],
threshold=np.sqrt(self.loss.data.n_features * self.loss.data.n_tasks) * self.eps_abs
)
# update beta
beta = self.reg.proximal(b + d, lam / self.rho)
# update r and d
r = b - beta
d += r
return beta, d, r, n_ridge.mean()
def _log(self, b, beta, lam, l, r, t, n_ridge, dt):
loss, augmented_obj, original_obj = self._compute_obj(b, beta, lam, r)
self.log_solve = self.log_solve.append(
pd.DataFrame({
"l": [l], "t": [t], "loss": loss,
"original obj.": [original_obj], "augmented obj.": [augmented_obj],
"status": ["ADMM iteration"], "n_grad": n_ridge, "n_prox": 1, "time": dt
}),
ignore_index=True
)
return loss, original_obj
def _initialize(self, beta, lam):
b = beta
r = b - beta
d = np.zeros_like(b)
t = 0
loss, augmented_obj, original_obj = self._compute_obj(b, beta, lam, r)
self.log_solve = self.log_solve.append( | pd.DataFrame({
"l": [0], "t": [t], "loss": loss, "original obj.": [original_obj], "time": 0.,
"augmented obj.": [augmented_obj], "status": ["initial"], "n_grad": [0], "n_prox": [0]}
) | pandas.DataFrame |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
load two csvs and merge them
Input:
meesages_filepath(str): file path of 1st csv file
categories_filepath(str): file path of 2nd csv file
Output:
df(dataframe): merged dataframe
"""
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
"""
FyleExtractConnector(): Connection between Fyle and Database
"""
import logging
from os import path
from typing import List
import pandas as pd
class FyleExtractConnector:
"""
- Extract Data from Fyle and load to Database
"""
def __init__(self, fyle_sdk_connection, dbconn):
self.__dbconn = dbconn
self.__connection = fyle_sdk_connection
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.info('Fyle connection established')
def create_tables(self):
"""
Creates DB tables
:return: None
"""
basepath = path.dirname(__file__)
ddl_path = path.join(basepath, 'extract_ddl.sql')
ddl_sql = open(ddl_path, 'r').read()
self.__dbconn.executescript(ddl_sql)
def extract_settlements(self, updated_at: List['str'] = None, exported: bool = None) -> List[str]:
"""
Extract settlements from Fyle
:param updated_at: Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern.
:param exported: True for exported settlements and False for unexported settlements
:return: List of settlement ids
"""
self.logger.info('Extracting settlements from Fyle.')
settlements = self.__connection.Settlements.get_all(updated_at=updated_at, exported=exported)
df_settlements = pd.DataFrame(settlements)
self.logger.info('%s settlements extracted.', str(len(df_settlements.index)))
if settlements:
df_settlements = df_settlements[[
'id', 'created_at', 'updated_at', 'opening_date', 'closing_date',
'employee_id', 'employee_email', 'employee_code', 'creator_employee_id',
'creator_employee_email', 'creator_employee_code', 'org_id', 'org_name',
'exported'
]]
df_settlements.to_sql('fyle_extract_settlements', self.__dbconn, if_exists='append', index=False)
return df_settlements['id'].to_list()
return []
def extract_employees(self) -> List[str]:
"""
Extract employees from Fyle
:return: List of employee ids
"""
self.logger.info('Extracting employees from Fyle.')
employees = self.__connection.Employees.get_all()
self.logger.info('%s employees extracted.', str(len(employees)))
if employees:
df_employees = pd.DataFrame(employees)
df_employees = df_employees[[
'id', 'created_at', 'updated_at', 'employee_email', 'employee_code',
'full_name', 'joining_date', 'location', 'level_id', 'level',
'business_unit', 'department_id', 'department', 'sub_department',
'approver1_email', 'approver2_email', 'approver3_email', 'title',
'branch_ifsc', 'branch_account', 'mobile', 'delegatee_email',
'default_cost_center_name', 'disabled', 'org_id', 'org_name'
]]
df_employees.to_sql('fyle_extract_employees', self.__dbconn, if_exists='append', index=False)
return df_employees['id'].to_list()
return []
def extract_expenses(self, settlement_ids: List[str] = None, state: List[str] = None,
fund_source: List[str] = None, reimbursable: bool = None, updated_at: List[str] = None,
exported: bool = None) -> List[str]:
"""
Extract expenses from Fyle
:param updated_at: Extract expenses in exported_at date range
:param exported: True for exported expenses and False for unexported expenses
:param settlement_ids: List of settlement_ids
:param state: List of expense states
:param fund_source: List of expense fund_sources
:param reimbursable: True for reimbursable expenses, False for non reimbursable expenses
:return: List of expense ids
"""
self.logger.info('Extracting expenses from Fyle.')
expenses = self.__connection.Expenses.get_all(
settlement_id=settlement_ids,
state=state,
updated_at=updated_at,
fund_source=fund_source,
exported=exported
)
if reimbursable is not None:
expenses = list(filter(lambda expense: expense['reimbursable'], expenses))
self.logger.info('%s expenses extracted.', str(len(expenses)))
if expenses:
df_expenses = pd.DataFrame(expenses)
df_expenses['approved_by'] = df_expenses['approved_by'].map(lambda expense: expense[0] if expense else None)
df_expenses = df_expenses[[
'id', 'employee_id', 'employee_email', 'employee_code', 'spent_at', 'currency',
'amount', 'foreign_currency', 'foreign_amount', 'purpose', 'project_id', 'project_name',
'cost_center_id', 'cost_center_name', 'category_id', 'category_code', 'category_name',
'sub_category', 'settlement_id', 'expense_number', 'claim_number', 'trip_request_id',
'state', 'report_id', 'fund_source', 'reimbursable', 'created_at', 'updated_at',
'approved_at', 'settled_at', 'verified', 'verified_at', 'reimbursed_at', 'added_to_report_at',
'report_submitted_at', 'vendor', 'has_attachments', 'billable', 'exported',
'approved_by', 'org_id', 'org_name', 'created_by'
]]
df_expenses.to_sql('fyle_extract_expenses', self.__dbconn, if_exists='append', index=False)
return df_expenses['id'].to_list()
return []
def extract_attachments(self, expense_ids: List[str]) -> List[str]:
"""
Extract attachments from Fyle
:param expense_ids: List of Expense Ids
:return: List of expense ids for which attachments were downloaded
"""
attachments = []
self.logger.info('Extracting attachments from Fyle')
if expense_ids:
for expense_id in expense_ids:
attachment = self.__connection.Expenses.get_attachments(expense_id)
if attachment['data']:
attachment = attachment['data'][0]
attachment['expense_id'] = expense_id
attachments.append(attachment)
self.logger.info('%s attachments extracted.', str(len(attachments)))
if attachments:
df_attachments = | pd.DataFrame(attachments) | pandas.DataFrame |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import scipy.stats
import functools
import pandas as pd
import numpy as np
import pymc3
import os
from scratch import log_likelihood
from scratch import metropolis
run_scratch = True
save_scratch_trace = True
run_pymc3 = False
# Make a straight line
y = [500, 10]
x = np.linspace(0, 20)
y_obs = np.array(y[0] + y[1]*x, dtype=int)
y_obs_noise = np.random.poisson(y_obs)
# General MCMC parameters
niter = 30000
nburn = 1000
if run_scratch:
####################################
########### From scratch ###########
####################################
print('Running linear regression from scratch example.')
# set up MCMC
prior = [scipy.stats.uniform(250, 600),
scipy.stats.uniform(0, 20)]
start = [prior_i.rvs() for prior_i in prior]
def target(parameters, x, y_obs):
""" Calculates the product of the likelihood and prior """
y = parameters[0] + parameters[1]*x
l = log_likelihood.log_poisson_likelihood(y, y_obs)
p = np.sum([np.log(prior_i.pdf(p)) for prior_i, p in zip(prior, parameters)])
return l + p
def proposal(parameters, proposal_jump=[10, 0.5]):
"""
Generate a new proposal, or "guess" for the MCMC to try next.
The new proposed value is picked from a Normal, centered on
the old value given by p. The proposal_jump array specifies
the standard deviation of the possible jumps from the prior
parameter value.
"""
new_vals = np.array([scipy.stats.norm(loc=p_i, scale=jump_i).rvs()
for p_i, jump_i in zip(parameters, proposal_jump)])
return new_vals
target_line = functools.partial(target, x=x, y_obs=y_obs_noise)
# Run the MCMC sampler
scratch_chain = metropolis.log_metroplis(start, target_line, proposal,
niter, nburn=nburn)
if save_scratch_trace:
if not os.path.exists('./data/'): # Check if data directory exists.
os.makedirs('./data/')
print('Made a ./data/ directory')
df = | pd.DataFrame(scratch_chain, columns=['y0', 'y1']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model
from sklearn import tree
from sklearn import neighbors
from sklearn import ensemble
from sklearn import svm
from sklearn import gaussian_process
from sklearn import naive_bayes
from sklearn import neural_network
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
testset = | pd.read_csv("../input/test.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def create_intervals(df: pd.DataFrame, column: str, n_intervals: int):
# Create the bins
array_percentile = np.linspace(0, 1, n_intervals + 1)
list_bins = list(df[[column]].quantile(array_percentile)[column])
list_bins[0], list_bins[-1] = -np.inf, +np.inf
# Create some value names
new_column_name = column + "_cat"
no_value_name = 'No ' + column
# Adjust people with the correct interval
df[new_column_name] = no_value_name
df.loc[df[column].notnull(), new_column_name] = pd.cut(
df[column].loc[df[column].notnull()], bins=list_bins)
def create_df_freq(serie: pd.Series):
list_words = []
for string in serie:
list_words += string.split()
serie_freq = pd.value_counts(np.array(list_words))
return | pd.DataFrame(serie_freq) | pandas.DataFrame |
from logging import getLogger, Formatter, StreamHandler, INFO
import subprocess
import time
import gzip
import tqdm
import click
import pandas as pd
import numpy as np
from util import timer
import ensloader
import loader
import euclidsearch
logger = getLogger('landmark')
@click.group()
def cli():
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
logger.setLevel(INFO)
logger.addHandler(handler)
@cli.command()
def stg2ens_0602w1_delfdba_top1000():
fn_dba = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe_norerank_topk1000.h5'
fn_npy = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe_norerank_topk1000.npy'
with timer('Load ensemble descriptors'):
ds = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval')
ds_trn = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval_dbatrain')
with timer('DELF-DBAQE'):
# alpha DBA(index+test)
ds = delfdbaqe(ds, ds_trn, qe_topk=5, thresh=90)
loader.save_index_dataset(fn_dba, ds)
with timer('Generate submission file'):
euclidsearch.gpux4_euclidsearch_from_dataset(ds, fn_npy, topk=1000)
@cli.command()
def stg2ens_0602w1_delfdba_top300():
fn_dba = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe_norerank_topk300.h5'
fn_npy = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe_norerank_topk300.npy'
with timer('Load ensemble descriptors'):
ds = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval')
ds_trn = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval_dbatrain')
with timer('DELF-DBAQE'):
# alpha DBA(index+test)
ds = delfdbaqe(ds, ds_trn, qe_topk=5, thresh=90)
loader.save_index_dataset(fn_dba, ds)
with timer('Generate submission file'):
euclidsearch.gpux4_euclidsearch_from_dataset(ds, fn_npy, topk=300)
@cli.command()
def stg2ens_0602w1_delfdba70():
"""
3349/3349
225861/225861
"""
fn_dba = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe70_norerank.h5'
fn_npy = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe70_norerank.npy'
with timer('Load ensemble descriptors'):
ds = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval')
ds_trn = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval_dbatrain')
with timer('DELF-DBAQE'):
# alpha DBA(index+test)
ds = delfdbaqe(ds, ds_trn, qe_topk=5, thresh=70)
loader.save_index_dataset(fn_dba, ds)
with timer('Generate submission file'):
euclidsearch.gpux4_euclidsearch_from_dataset(ds, fn_npy)
@cli.command()
def stg2ens_0602w1_delfdba120():
"""
"""
fn_dba = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe120_norerank.h5'
fn_npy = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe120_norerank.npy'
with timer('Load ensemble descriptors'):
ds = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval')
ds_trn = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval_dbatrain')
with timer('DELF-DBAQE'):
# alpha DBA(index+test)
ds = delfdbaqe(ds, ds_trn, qe_topk=5, thresh=120)
loader.save_index_dataset(fn_dba, ds)
with timer('Generate submission file'):
euclidsearch.gpux4_euclidsearch_from_dataset(ds, fn_npy)
@cli.command()
def stg2ens_0602w1_delfdba():
fn_dba = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe_norerank.h5'
fn_npy = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe_norerank.npy'
with timer('Load ensemble descriptors'):
ds = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval')
ds_trn = ensloader.load_desc('modelset_v0602',
'modelset_v0602_weight_v1',
mode='retrieval_dbatrain')
with timer('DELF-DBAQE'):
# alpha DBA(index+test)
ds = delfdbaqe(ds, ds_trn, qe_topk=5, thresh=90)
loader.save_index_dataset(fn_dba, ds)
with timer('Generate submission file'):
euclidsearch.gpux4_euclidsearch_from_dataset(ds, fn_npy)
def delfdbaqe(ds, ds_trn, qe_topk=5, thresh=90):
print(ds.ids_test.shape, ds.ids_index.shape)
test_id2idx_map = {id_: idx for idx, id_ in enumerate(ds.ids_test)}
index_id2idx_map = {id_: idx for idx, id_ in enumerate(ds.ids_index)}
train_id2idx_map = {id_: idx for idx, id_ in enumerate(ds_trn.ids_train)}
df1 = pd.read_csv('data/working/test19_train19_search_top100_RANSAC.csv')
df1 = df1[df1.inliers_count >= thresh]
df2 = pd.read_csv('data/working/test19_index19_search_top100_RANSAC.csv')
df2 = df2[df2.inliers_count >= thresh]
df = | pd.concat([df1, df2], sort=False) | pandas.concat |
# coding: utf-8
# In[ ]:
import csv
import math
import numpy as np
import pandas
import sys
import warnings
from collections import Counter
from csv import reader
from sklearn import tree
from sklearn import decomposition
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings("ignore")
def load_csv(fileName):
file = open(fileName, "r")
lines = reader(file)
dataset = list(lines)
return dataset
def stringColumnToFloat(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
def projection_simplex(v, z=1):
n_features = v.shape[0]
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
class MulticlassSVM(BaseEstimator, ClassifierMixin):
def __init__(self, C=1, max_iter=50, tol=0.05, random_state=None, verbose=0):
self.C = C
self.max_iter = max_iter
self.tol = tol,
self.random_state = random_state
self.verbose = verbose
def _partial_gradient(self, X, y, i):
# Partial gradient for the ith sample.
g = np.dot(X[i], self.coef_.T) + 1
g[y[i]] -= 1
return g
def _violation(self, g, y, i):
# Optimality violation for the ith sample.
smallest = np.inf
for k in range(g.shape[0]):
if k == y[i] and self.dual_coef_[k, i] >= self.C:
continue
elif k != y[i] and self.dual_coef_[k, i] >= 0:
continue
smallest = min(smallest, g[k])
return g.max() - smallest
def _solve_subproblem(self, g, y, norms, i):
Ci = np.zeros(g.shape[0])
Ci[y[i]] = self.C
beta_hat = norms[i] * (Ci - self.dual_coef_[:, i]) + g / norms[i]
z = self.C * norms[i]
beta = projection_simplex(beta_hat, z)
return Ci - self.dual_coef_[:, i] - beta / norms[i]
def fit(self, X, y):
n_samples, n_features = X.shape
self._label_encoder = LabelEncoder()
y = self._label_encoder.fit_transform(y)
n_classes = len(self._label_encoder.classes_)
self.dual_coef_ = np.zeros((n_classes, n_samples), dtype=np.float64)
self.coef_ = np.zeros((n_classes, n_features))
norms = np.sqrt(np.sum(X ** 2, axis=1))
rs = check_random_state(self.random_state)
ind = np.arange(n_samples)
rs.shuffle(ind)
violation_init = None
for it in range(self.max_iter):
violation_sum = 0
for ii in range(n_samples):
i = ind[ii]
if norms[i] == 0:
continue
g = self._partial_gradient(X, y, i)
v = self._violation(g, y, i)
violation_sum += v
if v < 1e-12:
continue
delta = self._solve_subproblem(g, y, norms, i)
self.coef_ += (delta * X[i][:, np.newaxis]).T
self.dual_coef_[:, i] += delta
if it == 0:
violation_init = violation_sum
vratio = violation_sum / violation_init
if vratio < self.tol:
break
return self
def predict(self, X):
decision = np.dot(X, self.coef_.T)
pred = decision.argmax(axis=1)
# print(pred)
return pred
def calculateTimeStampWeight(row, min_timestamp, max_timestamp):
return ((pandas.to_datetime(row['timestamp'])-min_timestamp).days + 1)/((max_timestamp-min_timestamp).days+1)
def TFIDFProductValue(row):
return row['tf']*row['idf']
def CalculateMovieTF(row):
return row['tag_weightage'] / row['total_movie_weightage']
def calculateIDFData(row, total_movies):
return math.log10(total_movies / row['count_of_movies'])
def calculateTFIDFData(tfdata, idfdata):
tfidfdata = tfdata.merge(idfdata, on='tagid')
tfidfdata['tfidf'] = tfidfdata.apply(TFIDFProductValue, axis=1)
return tfidfdata[['movieid','tagid','tfidf']]
def fetchMoviesTagsData():
allmoviesTagsData =pandas.read_csv("data/mltags.csv")
min_timestamp = pandas.to_datetime(min(allmoviesTagsData['timestamp']))
max_timestamp = pandas.to_datetime(max(allmoviesTagsData['timestamp']))
allmoviesTagsData['timestamp_weightage'] = allmoviesTagsData.apply(calculateTimeStampWeight, axis=1, args=(min_timestamp, max_timestamp))
allmoviesTagsData['tag_weightage'] = allmoviesTagsData.groupby(['movieid','tagid'])['timestamp_weightage'].transform('sum')
allmoviesTagsData = allmoviesTagsData[['movieid','tagid','tag_weightage']].drop_duplicates(subset=['movieid','tagid'])
allmoviesTagsData['total_movie_weightage'] = allmoviesTagsData.groupby(['movieid'])['tag_weightage'].transform('sum')
allmoviesTagsData['tf'] = allmoviesTagsData.apply(CalculateMovieTF, axis=1)
taglist = allmoviesTagsData['tagid'].tolist()
alltagsdata = pandas.read_csv("data/mltags.csv")
specifictagsdata = alltagsdata[alltagsdata['tagid'].isin(taglist)]
specifictagsdata.drop_duplicates(subset=['tagid', 'movieid'], inplace=True)
specifictagsdata['count_of_movies'] = specifictagsdata.groupby('tagid')['movieid'].transform('count')
specifictagsdata.drop_duplicates(subset=['tagid'], inplace=True)
moviesdata = pandas.read_csv("data/mlmovies.csv")
total_movies = moviesdata.shape[0]
specifictagsdata['idf'] = specifictagsdata.apply(calculateIDFData, axis=1, total_movies=total_movies)
tfidfdata = calculateTFIDFData(allmoviesTagsData, specifictagsdata[['tagid', 'idf']])
return tfidfdata
def fetchMoviesDetails(movielist):
moviedetails = | pandas.read_csv("data/mlmovies.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import glob
import os
from pathlib import Path
import itertools
import datetime as dt
from datetime import timedelta
from multiprocessing import Pool
from scipy.cluster import hierarchy
from scipy.cluster.hierarchy import cophenet
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings("ignore")
import findata.utils as fdutils
class FeatureEngineer():
def __init__(self, read_dir='data/raw', write_dir='data/processed'):
self.read_dir = os.path.join(read_dir, '')
self.write_dir = os.path.join(write_dir, '')
Path(self.write_dir).mkdir(parents=True, exist_ok=True)
self.logger = fdutils.new_logger('FeatureEngineer')
def set_params(self,
start_date='2011-04-11', end_date=None,
keep_duplicate='last',
filters={'avg_20d_pricevol':1000000, 'n_records':120},
price_lags=[20],
change_depth=2, change_lags=[5, 10, 20],
price_features=['atr', 'zscore', 'pricevol', 'vol', 'vwap', 'demark',
'dreturn', 'chaikin', 'corr', 'std'],
drop_cols=['price', 'open', 'high', 'low', 'close', 'dividends',
'symbol', 'volume', 'vol.', 'adj close', 'change %'],
index_etf='SPY', price_variables=['close', 'open'], profit_variable='price'):
self.start_date = dt.datetime.strptime(start_date, '%Y-%m-%d')
self.end_date = dt.datetime.now() if end_date is None else dt.datetime.strptime(end_date, '%Y-%m-%d')
if self.start_date.month==self.end_date.month and \
self.start_date.day==self.end_date.day and self.start_date.year==self.end_date.year:
raise ValueError('Start and end date are the same!')
self.keep_duplicate = keep_duplicate
self.filters = filters
self.price_features = price_features
self.price_lags = price_lags
self.change_depth = change_depth
self.change_lags = change_lags
self.drop_cols = drop_cols
self.index_etf = index_etf
self.price_variables = price_variables if isinstance(price_variables, list) else [price_variables]
self.profit_variable = profit_variable
def passes_filters(self, dff):
PASS = True
for filter in self.filters.keys():
try:
if 'avg_20d_pricevol' in self.filters.keys():
val = (dff['close']*dff['volume']).rolling(20).mean()
PASS = PASS & (val.values[-1] >= self.filters['avg_20d_pricevol'])
if 'n_records' in self.filters.keys():
PASS = PASS & (len(dff) >= self.filters['n_records'])
except Exception as e:
self.logger.debug(f'Was not able to apply filter {filter}:\n' + str(e))
return PASS
def format_cols(self, dff):
dff.columns = [col.lower() for col in dff.columns]
dff['date'] = pd.to_datetime(dff['date'])
dff = dff.set_index('date')
dff = dff.loc[~dff.index.duplicated(keep=self.keep_duplicate)]
return dff
def clean_cols(self, dff, f, keep=[]):
dff = self.drop_columns(dff)
dff.columns = [os.path.split(f)[1].replace('.csv','') + '_' + col for col in dff.columns]
return dff
def drop_columns(self, dff, keep=[]):
return dff.drop(columns=list(set(self.drop_cols) - set(keep)), errors='ignore')
def filter_dates(self, dff):
dff = dff.loc[(dff.index>=self.start_date) & (dff.index<=self.end_date)]
# Need to filter out dates with lots of NaN indicators (e.g. holiday in US but not in Canada --
# nothing's going to happen those days anyway), without filtering out a bunch of dates just because
# a couple ETFs didn't exist yet or something
perc_nas = dff.isnull().mean(axis=1)
dff = dff.loc[perc_nas<0.90]
return dff
def create_price_features(self, dff, index_dff=None, price_features=None):
price_features = price_features or self.price_features
try:
for price_variable in self.price_variables:
if price_variable not in dff.columns:
continue
util_cols = pd.DataFrame(index=dff.index)
util_cols['ma20d'] = dff[price_variable].rolling(20).mean()
util_cols['high52wk'] = dff['high'].rolling(250).max()
util_cols['low52wk'] = dff['low'].rolling(250).min()
util_cols['high20d'] = dff['high'].rolling(20).max()
util_cols['low20d'] = dff['low'].rolling(20).min()
util_cols['prev_close'] = dff['close'].shift(1)
util_cols['dreturn'] = dff[price_variable] / dff[price_variable].shift(1) - 1
util_cols['A'] = dff['high'] - util_cols['prev_close']
util_cols['B'] = util_cols['prev_close'] - dff['low']
util_cols['C'] = dff['high'] - dff['low']
dff['dreturn'] = dff[price_variable] / dff[price_variable].shift(1) - 1
for feat_name, lag in itertools.product(price_features, self.price_lags):
# Lagged indicators
if feat_name == 'dreturn':
dff[feat_name + str(lag)] = dff[price_variable] / dff[price_variable].shift(lag) - 1
if feat_name == 'zscore':
dff[feat_name + str(lag)] = (dff[price_variable] - dff[price_variable].rolling(lag, lag//2).mean()) \
/ dff[price_variable].rolling(lag, lag//2).std()
if feat_name == 'std':
dff[feat_name + str(lag)] = dff[price_variable].rolling(lag, lag//2).std()
if feat_name == 'pricevol':
dff[feat_name + str(lag)] = (dff['close']*dff['volume']).rolling(lag, lag//2).mean()
if feat_name == 'vol':
dff[feat_name + str(lag)] = dff['volume'].rolling(lag, lag//2).mean() / dff['volume'][0]
if feat_name == 'atr':
dff[feat_name] = util_cols[['A','B','C']].max(axis=1)
if feat_name == 'vwap':
dff[feat_name + str(lag)] = ((dff['close'] - dff['close'].shift(1)) * dff['volume']).rolling(lag, lag//2).sum()
if feat_name == 'demark':
demax = pd.Series([x if x>0 else 0 for x in (dff['high'] - dff['high'].shift(1))])
demin = pd.Series([x if x>0 else 0 for x in (dff['low'].shift(1) - dff['low'])])
dff['demark'] = (demax.rolling(lag, lag//2).mean() \
/ (demax.rolling(lag, lag//2).mean() + demin.rolling(lag, lag//2).mean())).values
# Immediate indicators
if feat_name == 'chaikin':
dff[feat_name] = dff['volume'] * ((dff['close'] - dff['low']) - (dff['high'] - dff['close'])) \
/ util_cols['C']
if index_dff is None:
continue
# Comparison-to-index indicators
if 'dreturn' not in index_dff.columns:
index_dff['dreturn'] = index_dff[price_variable] / index_dff[price_variable].shift(1) - 1
if feat_name == 'corr':
tmp = util_cols[['dreturn']].merge(index_dff[['dreturn']], left_index=True, right_index=True)
tmp['corr'] = tmp['dreturn_x'].rolling(lag, lag//2).corr(tmp['dreturn_y'])
dff = dff.merge(tmp[['corr']], how='left', left_index=True, right_index=True)
except Exception as e:
self.logger.debug(f'Exception while creating price-based feature \'{price_variable}\':\n' + str(e))
finally:
return dff
def read_file(self, f):
try:
df = | pd.read_csv(f, index_col=None) | pandas.read_csv |
import math
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from bcns import Durations, sim, Simulator, SimulatorCoordinated
from bcns.sim import Equ_LatD, Equ_pooled_LatD, Exp_LatD, Exp_pooled_LatD
def distance_between_2_points(a: tuple, b: tuple) -> float:
x1, y1 = a
x2, y2 = b
return round(math.sqrt((x2 - x1)**2 + (y2 - y1)**2), 6)
def prepare_test_centrality_lat_mat_baseline(nodes):
return Equ_LatD(3, 1, 0).tolist()
def prepare_test_centrality_lat_mat_1(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = np.array([0, 1, 2]) / 2
M2_lat = np.array([1, 0, 1]) / 2
M3_lat = np.array([2, 1, 0]) / 2
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare_test_centrality_lat_mat_2(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = np.array([0, 1, 4]) / 4
M2_lat = np.array([1, 0, 3]) / 4
M3_lat = np.array([4, 3, 0]) / 4
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare2_lat_mat_asymmetric(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = (np.array([1, 0, 1, 2, 3, 4, 5, 4]) / 4)
M2_lat = (np.array([5, 4, 3, 2, 1, 0, 1, 4]) / 4)
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i < j:
lat_mat[i][j] = lat_mat[i][j] * 100
return lat_mat
def prepare2_lat_mat(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
# 'OLM1', 'M1', 'OM1', 'OM', 'OM2', 'M2', 'ORM2', 'OEQ'
OLM1_lat = (np.array([0, 1, 2, 3, 4, 5, 6, 4.5825]) / 4)
M1_lat = (np.array([1, 0, 1, 2, 3, 4, 5, 4]) / 4)
ORM1_lat = (np.array([2, 1, 0, 1, 2, 3, 4, 4.583]) / 4)
OM_lat = (np.array([3, 2, 1, 0, 1, 2, 3, 4.472135955])/4)
OLM2_lat = (np.array([4, 3, 2, 1, 0, 1, 2, 4.583]) / 4)
M2_lat = (np.array([5, 4, 3, 2, 1, 0, 1, 4]) / 4)
ORM2_lat = (np.array([6, 5, 4, 3, 2, 1, 0, 4.5825])/4)
lm1 = (-1, 0)
m1 = (0, 0)
rm1 = (1, 0)
cm12 = (2, 0)
lm2 = (3, 0)
m2 = (4, 0)
rm2 = (5, 0)
m3 = (2, math.sqrt(12))
OEQ_lat = (np.array([distance_between_2_points(lm1, m3),
4,
distance_between_2_points(rm1, m3),
distance_between_2_points(cm12, m3),
distance_between_2_points(m3, lm2),
4,
distance_between_2_points(m3, rm2),
0]) / 4)
lat_mat = [OLM1_lat, M1_lat, ORM1_lat, OM_lat,
OLM2_lat, M2_lat, ORM2_lat, OEQ_lat]
lat_mat = list(map(lambda x: x.tolist(), lat_mat))
return lat_mat
def prepare1_coordinators_lat_mat_proportional(proportion):
C_lat = [0, proportion]
M1_lat = [proportion, 0]
lat_mat = [C_lat, M1_lat]
return lat_mat
def prepare1f_coordinators_lat_mat_proportional(proportion):
C_lat = [0, 0.5, 0.5 + proportion]
M1_lat = [0.5, 0, float('inf')]
M2_lat = [0.5 + proportion, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_proportional(proportion):
C_lat = [0, proportion * 1, (1-proportion) * 1]
M1_lat = [proportion * 1, 0, float('inf')]
M2_lat = [(1-proportion) * 1, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_proportional_M1_Farther(proportion, factor):
C_lat = [0, proportion * factor, (1-proportion) * 1]
M1_lat = [proportion * factor, 0, float('inf')]
M2_lat = [(1-proportion) * 1, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare3_coordinators_lat_mat_proportional(proportion):
m1 = (0,0)
m2 = (1,0)
m3 = (0.5, math.sqrt(0.75))
cp = (0.5, math.sqrt(0.75)-proportion)
C_lat = [0, distance_between_2_points(cp, m1), distance_between_2_points(cp, m2), distance_between_2_points(cp, m3)]
M1_lat = [distance_between_2_points(cp, m1), 0, float('inf'), float('inf')]
M2_lat = [distance_between_2_points(cp, m2), float('inf'), 0, float('inf')]
M3_lat = [distance_between_2_points(cp, m3), float('inf'), float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat, M3_lat]
return lat_mat
def prepare4_p2p_lat_mat_proportional(proportion):
m1 = (0,1)
m2 = (1,1)
m3 = (1,0)
m4 = (0,0)
M1_lat = [0, 1, 1.41421, 1]
M2_lat = [1, 0, 1, 1.41421]
M3_lat = [1.41421, 1, 0, 1]
M4_lat = [1, 1.41421, 1, 0]
lat_mat = [M1_lat, M2_lat, M3_lat, M4_lat]
return lat_mat
def prepare4_coordinators_lat_mat_proportional(proportion):
m1 = (0, 1)
m2 = (1, 1)
m3 = (1, 0)
m4 = (0, 0)
cp = (1-proportion, 1-proportion)
C_lat = [0,
distance_between_2_points(cp, m1),
distance_between_2_points(cp, m2),
distance_between_2_points(cp, m3),
distance_between_2_points(cp, m4)]
M1_lat = [distance_between_2_points(cp, m1), 0, float('inf'), float('inf'), float('inf')]
M2_lat = [distance_between_2_points(cp, m2), float('inf'), 0, float('inf'), float('inf')]
M3_lat = [distance_between_2_points(cp, m3), float('inf'), float('inf'), 0, float('inf')]
M4_lat = [distance_between_2_points(cp, m4), float('inf'), float('inf'), float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat, M3_lat, M4_lat]
return lat_mat
def prepare2_coordinators_lat_mat_middle():
C_lat = [0, .5, .5]
M1_lat = [.5, 0, float('inf')]
M2_lat = [.5, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_near_weaker():
C_lat = [0, 0.1, 0.9]
M1_lat = [0.1, 0, float('inf')]
M2_lat = [0.9, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_near_stronger():
C_lat = [0, 0.9, 0.1]
M1_lat = [0.9, 0, float('inf')]
M2_lat = [0.1, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_no_relay(nodes):
M1_lat = (np.array([0, 1, 2]))
C_lat = (np.array([1, 0, 1]) / 1000)
M2_lat = (np.array([2, 1, 0]))
lat_mat = [M1_lat, C_lat, M2_lat]
lat_mat = list(map(lambda x: x.tolist(), lat_mat))
return lat_mat
def prepare3_lat_mat_farther(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = np.array([1, 0, 1, 2, 3, 4, 5, 4*10]) / 4
M2_lat = np.array([5, 4, 3, 2, 1, 0, 1, 4*10]) / 4
M3_lat = np.array([11, 10, 9, 8, 9, 10, 11, 0])
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare3_lat_mat_fixed_asymetric(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = (np.array([1, 0, 1, 2, 3, 400, 5, 4]) / 4)
M2_lat = (np.array([5, 4, 3, 2, 1, 0, 1, 4]) / 4)
M3_lat = (
np.array([4.5825, 4, 4.583, 4.472135955, 4.583, 400, 4.5825, 0]) / 4)
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare3_lat_mat(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = (np.array([1, 0, 1, 2, 3, 4, 5, 4]) / 4)
M2_lat = (np.array([5, 4, 3, 2, 1, 0, 1, 4]) / 4)
##Coordinates:
lm1 = (-1, 0)
m1 = (0,0)
rm1 = (1,0)
cm12 = (2,0)
lm2 = (3,0)
m2 = (4,0)
rm2 = (5,0)
m3 = (2, math.sqrt(12))
M3_lat = (np.array([distance_between_2_points(lm1, m3),
4,
distance_between_2_points(rm1, m3),
distance_between_2_points(cm12, m3),
distance_between_2_points(m3, lm2),
4,
distance_between_2_points(m3, rm2),
0]) / 4)
#print(M3_lat)
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare5_lat_mat_fixed(nodes):
#self.NODES_IDS = ['WA-US', 'SI-CN', 'RE-IS', 'LI-CH', 'MO-RU']
'''# <location_1> <lat_1> <lng_1> <location_2> <lat_2> <lng_2> <dist. (in km)> <latency (in ms)>
WASHINGTON-DC-US 38.9047 -77.0164 SICHUAN-NA-CN 30.1333 102.9333 12338.40 197.41
WASHINGTON-DC-US 38.9047 -77.0164 REYKJAVÍK-NA-IS 64.1333 -21.9333 4512.89 72.21
WASHINGTON-DC-US 38.9047 -77.0164 LINTHAL-NA-CH 46.9167 9.0000 6703.91 107.26
WASHINGTON-DC-US 38.9047 -77.0164 MOSCOW-NA-RU 55.7500 37.6167 7820.54 125.13
SICHUAN-NA-CN 30.1333 102.9333 REYKJAVÍK-NA-IS 64.1333 -21.9333 8489.56 135.83
SICHUAN-NA-CN 30.1333 102.9333 LINTHAL-NA-CH 46.9167 9.0000 7891.06 126.26
SICHUAN-NA-CN 30.1333 102.9333 MOSCOW-NA-RU 55.7500 37.6167 5761.37 92.18
REYKJAVÍK-NA-IS 64.1333 -21.9333 LINTHAL-NA-CH 46.9167 9.0000 2680.24 42.88
REYKJAVÍK-NA-IS 64.1333 -21.9333 MOSCOW-NA-RU 55.7500 37.6167 3307.89 52.93
LINTHAL-NA-CH 46.9167 9.0000 MOSCOW-NA-RU 55.7500 37.61672196.05 35.14
'''
# ['WA-US', 'SI-CN', 'RE-IS', 'LI-CH', 'MO-RU']
WA_lat = np.array([0, 197.41, 72.21, 107.26, 125.13])/ (1000*1.5)
SI_lat = np.array([-1, 0, 135.83, 126.26, 92.18])/ (1000*1.5)
RE_lat = np.array([-1, -1, 0, 42.88, 52.93])/ (1000*1.5)
LI_lat = np.array([-1, -1, -1, 0, 35.14])/ (1000*1.5)
MO_lat = np.array([-1, -1, -1, -1, 0])/ (1000*1.5)
lat_mat = [WA_lat, SI_lat, RE_lat, LI_lat, MO_lat]
for i in range(len(lat_mat)):
for j in range(len(lat_mat)):
if i > j:
lat_mat[i][j] = lat_mat[j][i]
return lat_mat
def prepare100_lat_mat_fixed_centrality(nodes):
latencies = pd.read_csv('evaluation/100_cities.txt', delim_whitespace=True)
lat_dict = defaultdict(dict)
for i in range(len(latencies)):
row = latencies.iloc[i]
lat_dict[row['location_1']][row['location_2']] = row['latency_ms']
lat_dict[row['location_2']][row['location_1']] = row['latency_ms']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
lat_mat[i][j] = (lat_dict[nodes[i]][nodes[j]] / (1000*1.5))
return lat_mat
def prepare240_lat_mat_fixed_capital_centrality(nodes):
latencies = pd.read_csv(
'evaluation/cities_capitals_lat_lng_latency.txt', delim_whitespace=True)
lat_dict = defaultdict(dict)
for i in range(len(latencies)):
row = latencies.iloc[i]
lat_dict[row['location_1']][row['location_2']] = row['latency_ms']
lat_dict[row['location_2']][row['location_1']] = row['latency_ms']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
lat_mat[i][j] = (lat_dict[nodes[i]][nodes[j]]/(1000*1.5))
return lat_mat
def prepare15_lat_mat_ls_fixed_capital_centrality(nodes):
latencies = pd.read_csv(
'evaluation/cities_capitals_lat_lng_latency.txt', delim_whitespace=True)
lat_dict = defaultdict(dict)
for i in range(len(latencies)):
row = latencies.iloc[i]
lat_dict[row['location_1']][row['location_2']] = row['latency_ms']
lat_dict[row['location_2']][row['location_1']] = row['latency_ms']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
lat_mat[i][j] = (lat_dict[nodes[i]][nodes[j]] / (1000*3.2))
return lat_mat
def prepare240_lat_mat_cs_fixed_capital_centrality(nodes):
latencies = pd.read_csv(
'evaluation/cities_capitals_lat_lng_latency.txt', delim_whitespace=True)
lat_dict = defaultdict(dict)
for i in range(len(latencies)):
row = latencies.iloc[i]
lat_dict[row['location_1']][row['location_2']] = row['latency_ms']
lat_dict[row['location_2']][row['location_1']] = row['latency_ms']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
lat_mat[i][j] = (lat_dict[nodes[i]][nodes[j]] / (1000*3.2*1.5))
return lat_mat
def prepare15_lat_mat_fixed(nodes):
# nodes= ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
# 'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
# 'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
# 'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
latencies = pd.read_csv('evaluation/adjlst-2.txt', delim_whitespace=True)
nodes = ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
f1 = latencies[latencies['location_1'] == nodes[i]
][latencies['location_2'] == nodes[j]]
if len(f1) == 0:
f2 = latencies[latencies['location_2'] == nodes[i]
][latencies['location_1'] == nodes[j]]
result = f2['latency_ms'].iloc[0]
else:
result = f1['latency_ms'].iloc[0]
lat_mat[i][j] = (result/(1000*1.5))
return lat_mat
def prepare15_ls_lat_mat_fixed(nodes):
# nodes= ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
# 'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
# 'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
# 'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
latencies = pd.read_csv('evaluation/adjlst-2.txt', delim_whitespace=True)
nodes = ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
f1 = latencies[latencies['location_1'] == nodes[i]
][latencies['location_2'] == nodes[j]]
if len(f1) == 0:
f2 = latencies[latencies['location_2'] == nodes[i]
][latencies['location_1'] == nodes[j]]
result = f2['latency_ms'].iloc[0]
else:
result = f1['latency_ms'].iloc[0]
lat_mat[i][j] = (result/(1000*3.2))
return lat_mat
def prepare15_cs_lat_mat_fixed(nodes):
# nodes= ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
# 'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
# 'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
# 'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
latencies = pd.read_csv('evaluation/adjlst-2.txt', delim_whitespace=True)
nodes = ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
f1 = latencies[latencies['location_1'] == nodes[i]
][latencies['location_2'] == nodes[j]]
if len(f1) == 0:
f2 = latencies[latencies['location_2'] == nodes[i]
][latencies['location_1'] == nodes[j]]
result = f2['latency_ms'].iloc[0]
else:
result = f1['latency_ms'].iloc[0]
lat_mat[i][j] = (result/(1000*3.2*1.5))
return lat_mat
def to_dataframe_prepare_test_centrality_lat_mat_baseline(experiments_stats, nodes_ids):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = pd.concat(miner_df)
df.drop(columns=['miners'], inplace=True)
df.hpd = df.hpd.apply(lambda x: f"[{x[0]}, {x[1]}, {x[2]}]")
miner_df.global_hpd = miner_df.global_hpd.apply(
lambda x: f"[{x[0]}, {x[1]}, {x[2]}]")
miner_df.id = miner_df.id.map(dict(zip(range(0, 3), nodes_ids)))
return {'miner': miner_df, 'global': df}
def to_dataframe2(experiments_stats, nodes_ids, nodes_count=2):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = pd.concat(miner_df)
df.drop(columns=['miners'], inplace=True)
df.hpd = df.hpd.apply(lambda x: f"[{x[1]}, {x[5]}]")
miner_df.global_hpd = miner_df.global_hpd.apply(
lambda x: f"[{x[1]}, {x[5]}]")
miner_df.id = miner_df.id.map(dict(zip(range(0, 8), nodes_ids)))
return {'miner': miner_df, 'global': df}
def to_dataframe1_coordinators(experiments_stats, nodes_ids, nodes_count=2):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = pd.concat(miner_df)
df.drop(columns=['miners'], inplace=True)
df.hpd = df.hpd.apply(lambda x: f"[{x[0]}, {x[1]}]")
miner_df.global_hpd = miner_df.global_hpd.apply(
lambda x: f"[{x[0]}, {x[1]}]")
miner_df.id = miner_df.id.map(
dict(zip(range(0, len(nodes_ids)), nodes_ids)))
return {'miner': miner_df, 'global': df}
def to_dataframe2_coordinators(experiments_stats, nodes_ids, nodes_count=3):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = | pd.concat(miner_df) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
from zvt.recorders.joinquant.common import JoinquantTimestampsDataRecorder, call_joinquant_api, get_from_path_fields, \
get_fc
from zvt.api import to_report_period_type
from zvt.contract.api import df_to_db, get_data
from zvt.domain import StockDetail, FinancePerShare, BalanceSheet
from zvt.recorders.emquantapi.common import mainCallback, to_em_entity_id
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_time_str, TIME_FORMAT_DAY, now_pd_timestamp, to_pd_timestamp
try:
from EmQuantAPI import c
except:
pass
class EmBaseChinaStockFinanceIndexRecorder(JoinquantTimestampsDataRecorder):
finance_report_type = None
data_type = 1
timestamps_fetching_url = 'https://emh5.eastmoney.com/api/CaiWuFenXi/GetCompanyReportDateList'
timestamp_list_path_fields = ['CompanyReportDateList']
timestamp_path_fields = ['ReportDate']
entity_provider = 'emquantapi'
entity_schema = StockDetail
provider = 'emquantapi'
def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,
force_update=False, sleeping_time=5, default_size=2000, real_time=False,
fix_duplicate_way='add', start_timestamp=None, end_timestamp=None, close_hour=0,
close_minute=0) -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
# 调用登录函数(激活后使用,不需要用户名密码)
loginResult = c.start("ForceLogin=1", '', mainCallback)
if (loginResult.ErrorCode != 0):
print("login in fail")
exit()
def on_finish(self):
# 退出
loginresult = c.stop()
if (loginresult.ErrorCode != 0):
print("login in fail")
exit()
def init_timestamps(self, entity):
param = {
"color": "w",
"fc": get_fc(entity),
"DataType": 1
}
if self.finance_report_type == 'INCOME_STATEMENT' or self.finance_report_type == 'CASHFLOW_STATEMENT':
param['ReportType'] = 1
timestamp_json_list = call_joinquant_api(url=self.timestamps_fetching_url,
path_fields=self.timestamp_list_path_fields,
param=param)
if self.timestamp_path_fields:
timestamps = [get_from_path_fields(data, self.timestamp_path_fields) for data in timestamp_json_list]
return [to_pd_timestamp(t) for t in timestamps]
def generate_request_param(self, security_item, start, end, size, timestamps):
return [to_time_str(i) for i in (timestamps)]
def record(self, entity, start, end, size, timestamps):
param = self.generate_request_param(entity, start, end, size, timestamps)
# to_time_str(
# self.data_schema.query_data(filters=[self.data_schema.report_date>='20200101'],
# entity_id=entity.id, columns=['report_date']).report_date.max()) >= to_time_str(
# '20200101')
columns_map = {key: value[0] for key, value in self.get_data_map().items()}
columns_list = list(columns_map.values())
em_code = to_em_entity_id(entity)
df = pd.DataFrame()
for reportdate in param:
em_data = c.css(em_code, columns_list, "ispandas=1,TtmType=1,TradeDate=" + to_time_str(
reportdate) + ",ReportDate=" + to_time_str(reportdate))
if type(em_data) == pd.DataFrame:
em_data['report_date'] = to_time_str(reportdate)
df = df.append(em_data)
if df.empty:
return None
df.rename(columns={value: key for key, value in columns_map.items()}, inplace=True)
df = df.sort_values("report_date", ascending=True)
if pd_is_not_null(df):
df.rename(columns={value: key for key, value in columns_map.items()}, inplace=True)
df['entity_id'] = entity.id
df['provider'] = 'emquantapi'
df['code'] = entity.code
df['report_period'] = df['report_date'].apply(lambda x: to_report_period_type(x))
def generate_id(se):
return "{}_{}".format(se['entity_id'], to_time_str(se['report_date'], fmt=TIME_FORMAT_DAY))
df['id'] = df[['entity_id', 'report_date']].apply(generate_id, axis=1)
data_pub_date = BalanceSheet.query_data(entity_id=entity.id, columns=['pub_date', 'id'])
del data_pub_date['timestamp']
df = pd.merge(df, data_pub_date, on=['id'])
df['timestamp'] = df['pub_date']
df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=True)
return None
def record2(self, entity, start, end, size, timestamps):
param = self.generate_request_param(entity, start, end, size, timestamps)
if not end:
end = to_time_str(now_pd_timestamp())
start = to_time_str(start)
em_code = to_em_entity_id(entity)
df = | pd.DataFrame() | pandas.DataFrame |
import re
import warnings
import pandas as pd
import numpy as np
from string import Formatter
from psycopg2 import sql
from airflow.hooks.postgres_hook import PostgresHook
from drawbridge.connections.airflow import AirflowConnectionHandler
from drawbridge.util._decorators import cache_dataset
from drawbridge import queries
INITIAL_DATA_DATE = '2017-12-09'
DATASET_TO_VERSION_TABLE_TRANSLATION = {
'properties': 'property_versions',
'users': 'user_versions',
'policies': 'policy_versions',
'insureds': 'insured_versions',
'people': 'people_versions'
}
DATASET_TO_ID_TRANSLATION = {
'properties': 'property_id',
'policies': 'policy_id',
'people': 'person_id',
'users': 'user_id',
'insureds': 'insured_id',
'addresses': 'address_id',
'households': 'household_id'
}
class Inquisition(object):
def __init__(self,
conn_id,
conn_cfg_path=None,
cache_path=None,
cache=False,
show_warnings=False):
if not AirflowConnectionHandler().has_connection(conn_id) and conn_cfg_path:
AirflowConnectionHandler().add_connection_from_file(conn_id, conn_cfg_path)
self.conn = PostgresHook(postgres_conn_id=conn_id).get_conn()
self.cache = cache
self.cache_path = cache_path
self.show_warnings = show_warnings
@cache_dataset
def query(self, inquiry, params=dict(), cache_name=None, cache=None):
if cache is None:
cache = self.cache
if isinstance(inquiry, str):
inquiry = Inquiry(query=inquiry, show_warnings=self.show_warnings)
inquiry.fill_inquiry(self.conn, params)
statement = inquiry.format_inquiry()
return pd.read_sql_query(sql=statement, con=self.conn)
@cache_dataset
def get_table_snapshot(self, table_name, params=dict(), cache_name=None, cache=None):
if cache is None:
cache = self.cache
inquiry = Inquiry(
query='table_history',
params=dict(
table_name=DATASET_TO_VERSION_TABLE_TRANSLATION[table_name],
date_max=params['date_max']
),
subqueries=dict(
selected_items=Inquiry(
query=table_name + '__selected',
params=params,
show_warnings=self.show_warnings
)
)
)
change_log = self.query(inquiry=inquiry)
table_objects = (
change_log.loc[ | pd.notnull(change_log['object_changes']) | pandas.notnull |
import copy
import json
import jsonschema
import logging
import pandas as pd
import os
from sklearn.cross_validation import train_test_split
import minst.utils as utils
logger = logging.getLogger(__name__)
class MissingDataException(Exception):
pass
class Observation(object):
"""Document model each item in the collection."""
# This should use package resources :o(
SCHEMA_PATH = os.path.join(os.path.dirname(__file__), 'schema',
'observation.json')
SCHEMA = json.load(open(SCHEMA_PATH))
def __init__(self, index, dataset, audio_file, instrument, source_index,
start_time, duration, note_number=None, dynamic='',
partition=''):
"""Model definition for an instrument observation.
Parameters
----------
index :
dataset :
audio_file : str
Relative file path to an audiofile.
instrument :
source_index :
start_time :
duration :
note_number :
dynamic :
partition :
Returns
-------
obs : Observation
Populated observation
"""
self.index = index
self.dataset = dataset
self.audio_file = audio_file
self.instrument = instrument
self.source_index = source_index
self.start_time = start_time
self.duration = duration
self.note_number = note_number
self.dynamic = dynamic
self.partition = partition
def to_builtin(self):
return self.__dict__.copy()
@classmethod
def from_series(cls, series):
"""Convert a pd.Series to an Observation."""
return cls(index=series.name, **series.to_dict())
def to_series(self):
"""Convert to a flat series (ie make features a column)
Returns
-------
pandas.Series
"""
flat_dict = self.to_dict()
name = flat_dict.pop("index")
return pd.Series(data=flat_dict, name=name)
def to_dict(self):
return self.__dict__.copy()
def __getitem__(self, key):
return self.__dict__[key]
def validate(self, schema=None, verbose=False, check_files=True):
"""Returns True if valid.
"""
schema = self.SCHEMA if schema is None else schema
success = True
try:
jsonschema.validate(self.to_builtin(), schema)
except jsonschema.ValidationError as derp:
success = False
if verbose:
print("Failed schema test: \n{}".format(derp))
if success and check_files:
success &= utils.check_audio_file(self.audio_file)[0]
if not success and verbose:
print("Failed file check: \n{}".format(self.audio_file))
return success
def _enforce_obs(obs, audio_root='', strict=True):
"""Get dict from an Observation if an observation, else just dict"""
audio_file = obs['audio_file']
escaped_audio_file = os.path.join(audio_root, audio_file)
file_checks = [os.path.exists(audio_file),
os.path.exists(escaped_audio_file)]
if not any(file_checks) and strict:
raise MissingDataException(
"Audio file(s) missing:\n\tbase: {}\n\tescaped:{}"
"".format(audio_file, escaped_audio_file))
if isinstance(obs, Observation):
obs = obs.to_dict()
obs['audio_file'] = escaped_audio_file if file_checks[1] else audio_file
return obs
class Collection(object):
"""Dictionary-like collection of Observations (maintains order).
Expands relative audio files to a given `audio_root` path.
"""
# MODEL = Observation
def __init__(self, observations, audio_root='', strict=False):
"""
Parameters
----------
observations : list
List of Observations (as dicts or Observations.)
If they're dicts, this will convert them to Observations.
data_root : str or None
Path to look for an observation, if not None
"""
self._observations = [Observation(**_enforce_obs(x, audio_root,
strict))
for x in observations]
self.audio_root = audio_root
self.strict = strict
def __eq__(self, a):
is_eq = False
if hasattr(a, 'to_builtin'):
is_eq = self.to_builtin() == a.to_builtin()
return is_eq
def __len__(self):
return len(self.values())
def __getitem__(self, n):
"""Return the observation for a given integer index."""
return self._observations[n]
def items(self):
return [(v.index, v) for v in self.values()]
def values(self):
return self._observations
def keys(self):
return [v.index for v in self.values()]
def append(self, observation, audio_root=None):
audio_root = self.audio_root if audio_root is None else audio_root
obs = _enforce_obs(observation, audio_root, self.strict)
self._observations += [Observation(**obs)]
def to_builtin(self):
return [v.to_builtin() for v in self.values()]
@classmethod
def read_json(cls, json_path, audio_root=''):
with open(json_path, 'r') as fh:
return cls(json.load(fh), audio_root=audio_root)
def to_json(self, json_path=None, **kwargs):
"""Pandas-like `to_json` method.
Parameters
----------
json_path : str, or None
If given, will attempt to write JSON to disk; else returns a string
of serialized data.
**kwargs : keyword args
Pass-through parameters to the JSON serializer.
"""
sdata = json.dumps(self.to_builtin(), **kwargs)
if json_path is not None:
with open(json_path, 'w') as fh:
fh.write(sdata)
else:
return sdata
def validate(self, verbose=False, check_files=True):
"""Returns True if all are valid."""
return all([x.validate(verbose=verbose, check_files=check_files)
for x in self.values()])
def to_dataframe(self):
return pd.DataFrame([x.to_series() for x in self.values()])
@classmethod
def from_dataframe(cls, dframe, audio_root=''):
return cls([Observation.from_series(x) for _, x in dframe.iterrows()],
audio_root=audio_root)
def copy(self, deep=True):
return Collection(copy.deepcopy(self._observations))
def view(self, column, filter_value):
"""Returns a copy of the collection restricted to the filter value.
Parameters
----------
column : str
Name of the column for filtering.
filter_value : obj
Value to restrict the collection.
Returns
-------
"""
thecopy = copy.copy(self.to_dataframe())
ds_view = thecopy[thecopy[column] == filter_value]
return Collection.from_dataframe(ds_view, self.audio_root)
def load(filename, audio_root):
"""
"""
return Collection.load(filename)
def partition_collection(collection, test_set, train_val_split=0.2,
max_files_per_class=None):
"""Returns Datasets for train and validation constructed
from the datasets not in the test_set, and split with
the ratio train_val_split.
* First selects from only the datasets given in datasets.
* Then **for each instrument** (so the distribution from
each instrument doesn't change)
* train_test_split to generate training and validation sets.
* if max_files_per_class, also then restrict the training set to
a maximum of that number of files for each train and test
Parameters
----------
test_set : str
String in ["rwc", "uiowa", "philharmonia"] which selects
the hold-out-set to be used for testing.
Returns
-------
partition_df : pd.DataFrame
DataFrame with only an index to the original table, and
the partiition in ['train', 'valid', 'test']
"""
df = collection.to_dataframe()
test_df = collection.view(
column='dataset', filter_value=test_set).to_dataframe()
datasets = set(df["dataset"].unique()) - set([test_set])
search_df = df[df["dataset"].isin(datasets)]
selected_instruments_train = []
selected_instruments_valid = []
for instrument in search_df["instrument"].unique():
instrument_df = search_df[search_df["instrument"] == instrument]
if len(instrument_df) < 2:
logger.warning("Instrument {} doesn't haven enough samples "
"to split.".format(instrument))
continue
groups = instrument_df.groupby(['source_index'])
train_grps, valid_grps = train_test_split(
list(groups), test_size=train_val_split)
# Groups get backed out as (source_index, dataframe) tuples, so stick
# these back together now that they've been partitioned.
traindf = pd.concat(x[1] for x in train_grps)
validdf = | pd.concat(x[1] for x in valid_grps) | pandas.concat |
"""
Test AR Model
"""
import datetime as dt
from itertools import product
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import pandas as pd
from pandas import Index, Series, date_range, period_range
from pandas.testing import assert_series_equal
import pytest
from statsmodels.datasets import macrodata, sunspots
from statsmodels.iolib.summary import Summary
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.sm_exceptions import SpecificationWarning, ValueWarning
from statsmodels.tools.tools import Bunch
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.tsa.deterministic import (
DeterministicProcess,
Seasonality,
TimeTrend,
)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.tests.results import results_ar
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
def gen_ar_data(nobs):
rs = np.random.RandomState(982739)
idx = pd.date_range(dt.datetime(1900, 1, 1), freq="M", periods=nobs)
return pd.Series(rs.standard_normal(nobs), index=idx), rs
def gen_ols_regressors(ar, seasonal, trend, exog):
nobs = 500
y, rs = gen_ar_data(nobs)
maxlag = ar if isinstance(ar, int) else max(ar)
reg = []
if "c" in trend:
const = pd.Series(np.ones(nobs), index=y.index, name="const")
reg.append(const)
if "t" in trend:
time = np.arange(1, nobs + 1)
time = pd.Series(time, index=y.index, name="time")
reg.append(time)
if isinstance(ar, int) and ar:
lags = np.arange(1, ar + 1)
elif ar == 0:
lags = None
else:
lags = ar
if seasonal:
seasons = np.zeros((500, 12))
for i in range(12):
seasons[i::12, i] = 1
cols = ["s.{0}".format(i) for i in range(12)]
seasons = pd.DataFrame(seasons, columns=cols, index=y.index)
if "c" in trend:
seasons = seasons.iloc[:, 1:]
reg.append(seasons)
if maxlag:
for lag in lags:
reg.append(y.shift(lag))
if exog:
x = rs.standard_normal((nobs, exog))
cols = ["x.{0}".format(i) for i in range(exog)]
x = pd.DataFrame(x, columns=cols, index=y.index)
reg.append(x)
else:
x = None
reg.insert(0, y)
df = pd.concat(reg, axis=1).dropna()
endog = df.iloc[:, 0]
exog = df.iloc[:, 1:]
return y, x, endog, exog
ar = [0, 3, [1, 3], [3]]
seasonal = [True, False]
trend = ["n", "c", "t", "ct"]
exog = [None, 2]
covs = ["nonrobust", "HC0"]
params = list(product(ar, seasonal, trend, exog, covs))
final = []
for param in params:
if param[0] != 0 or param[1] or param[2] != "n" or param[3]:
final.append(param)
params = final
names = ("AR", "Seasonal", "Trend", "Exog", "Cov Type")
ids = [
", ".join([n + ": " + str(p) for n, p in zip(names, param)])
for param in params
]
@pytest.fixture(scope="module", params=params, ids=ids)
def ols_autoreg_result(request):
ar, seasonal, trend, exog, cov_type = request.param
y, x, endog, exog = gen_ols_regressors(ar, seasonal, trend, exog)
ar_mod = AutoReg(y, ar, seasonal=seasonal, trend=trend, exog=x)
ar_res = ar_mod.fit(cov_type=cov_type)
ols = OLS(endog, exog)
ols_res = ols.fit(cov_type=cov_type, use_t=False)
return ar_res, ols_res
attributes = [
"bse",
"cov_params",
"df_model",
"df_resid",
"fittedvalues",
"llf",
"nobs",
"params",
"resid",
"scale",
"tvalues",
"use_t",
]
def fix_ols_attribute(val, attrib, res):
"""
fixes to correct for df adjustment b/t OLS and AutoReg with nonrobust cov
"""
nparam = res.k_constant + res.df_model
nobs = nparam + res.df_resid
df_correction = (nobs - nparam) / nobs
if attrib in ("scale",):
return val * df_correction
elif attrib == "df_model":
return val + res.k_constant
elif res.cov_type != "nonrobust":
return val
elif attrib in ("bse", "conf_int"):
return val * np.sqrt(df_correction)
elif attrib in ("cov_params", "scale"):
return val * df_correction
elif attrib in ("f_test",):
return val / df_correction
elif attrib in ("tvalues",):
return val / np.sqrt(df_correction)
return val
@pytest.mark.parametrize("attribute", attributes)
def test_equiv_ols_autoreg(ols_autoreg_result, attribute):
a, o = ols_autoreg_result
ols_a = getattr(o, attribute)
ar_a = getattr(a, attribute)
if callable(ols_a):
ols_a = ols_a()
ar_a = ar_a()
ols_a = fix_ols_attribute(ols_a, attribute, o)
assert_allclose(ols_a, ar_a)
def test_conf_int_ols_autoreg(ols_autoreg_result):
a, o = ols_autoreg_result
a_ci = a.conf_int()
o_ci = o.conf_int()
if o.cov_type == "nonrobust":
spread = o_ci.T - o.params
spread = fix_ols_attribute(spread, "conf_int", o)
o_ci = (spread + o.params).T
assert_allclose(a_ci, o_ci)
def test_f_test_ols_autoreg(ols_autoreg_result):
a, o = ols_autoreg_result
r = np.eye(a.params.shape[0])
a_f = a.f_test(r).fvalue
o_f = o.f_test(r).fvalue
o_f = fix_ols_attribute(o_f, "f_test", o)
assert_allclose(a_f, o_f)
@pytest.mark.smoke
def test_other_tests_autoreg(ols_autoreg_result):
a, _ = ols_autoreg_result
r = np.ones_like(a.params)
a.t_test(r)
r = np.eye(a.params.shape[0])
a.wald_test(r)
# TODO: test likelihood for ARX model?
class CheckARMixin(object):
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_6)
def test_bse(self):
bse = np.sqrt(np.diag(self.res1.cov_params()))
# no dof correction for compatability with Stata
assert_almost_equal(bse, self.res2.bse_stata, DECIMAL_6)
assert_almost_equal(self.res1.bse, self.res2.bse_gretl, DECIMAL_5)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_6)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe, DECIMAL_6)
def test_pickle(self):
from io import BytesIO
fh = BytesIO()
# test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0, 0)
res_unpickled = self.res1.__class__.load(fh)
assert type(res_unpickled) is type(self.res1) # noqa: E721
@pytest.mark.smoke
def test_summary(self):
assert isinstance(self.res1.summary().as_text(), str)
@pytest.mark.smoke
def test_pvalues(self):
assert isinstance(self.res1.pvalues, (np.ndarray, pd.Series))
params = product(
[0, 1, 3, [1, 3]],
["n", "c", "t", "ct"],
[True, False],
[0, 2],
[None, 11],
["none", "drop"],
[True, False],
[None, 12],
)
params = list(params)
params = [
param
for param in params
if (param[0] or param[1] != "n" or param[2] or param[3])
]
params = [
param
for param in params
if not param[2] or (param[2] and (param[4] or param[6]))
]
param_fmt = """\
lags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \
missing: {5}, pandas: {6}, hold_back{7}"""
ids = [param_fmt.format(*param) for param in params]
def gen_data(nobs, nexog, pandas, seed=92874765):
rs = np.random.RandomState(seed)
endog = rs.standard_normal((nobs))
exog = rs.standard_normal((nobs, nexog)) if nexog else None
if pandas:
index = pd.date_range(
dt.datetime(1999, 12, 31), periods=nobs, freq="M"
)
endog = pd.Series(endog, name="endog", index=index)
if nexog:
cols = ["exog.{0}".format(i) for i in range(exog.shape[1])]
exog = pd.DataFrame(exog, columns=cols, index=index)
from collections import namedtuple
DataSet = namedtuple("DataSet", ["endog", "exog"])
return DataSet(endog=endog, exog=exog)
@pytest.fixture(scope="module", params=params, ids=ids)
def ar_data(request):
lags, trend, seasonal = request.param[:3]
nexog, period, missing, use_pandas, hold_back = request.param[3:]
data = gen_data(250, nexog, use_pandas)
return Bunch(
trend=trend,
lags=lags,
seasonal=seasonal,
period=period,
endog=data.endog,
exog=data.exog,
missing=missing,
hold_back=hold_back,
)
@pytest.fixture(scope="module")
def ar2(request):
gen = np.random.RandomState(20210623)
e = gen.standard_normal(52)
y = 10 * np.ones_like(e)
for i in range(2, y.shape[0]):
y[i] = 1 + 0.5 * y[i - 1] + 0.4 * y[i - 2] + e[i]
index = pd.period_range("2000-01-01", periods=e.shape[0] - 2, freq="M")
return pd.Series(y[2:], index=index)
params = product(
[0, 3, [1, 3]],
["c"],
[True, False],
[0],
[None, 11],
["drop"],
[True, False],
[None, 12],
)
params = list(params)
params = [
param
for param in params
if (param[0] or param[1] != "n" or param[2] or param[3])
]
params = [
param
for param in params
if not param[2] or (param[2] and (param[4] or param[6]))
]
param_fmt = """\
lags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \
missing: {5}, pandas: {6}, hold_back: {7}"""
ids = [param_fmt.format(*param) for param in params]
# Only test 1/3 to save time
@pytest.fixture(scope="module", params=params[::3], ids=ids[::3])
def plot_data(request):
lags, trend, seasonal = request.param[:3]
nexog, period, missing, use_pandas, hold_back = request.param[3:]
data = gen_data(250, nexog, use_pandas)
return Bunch(
trend=trend,
lags=lags,
seasonal=seasonal,
period=period,
endog=data.endog,
exog=data.exog,
missing=missing,
hold_back=hold_back,
)
@pytest.mark.matplotlib
@pytest.mark.smoke
def test_autoreg_smoke_plots(plot_data, close_figures):
from matplotlib.figure import Figure
mod = AutoReg(
plot_data.endog,
plot_data.lags,
trend=plot_data.trend,
seasonal=plot_data.seasonal,
exog=plot_data.exog,
hold_back=plot_data.hold_back,
period=plot_data.period,
missing=plot_data.missing,
)
res = mod.fit()
fig = res.plot_diagnostics()
assert isinstance(fig, Figure)
if plot_data.exog is None:
fig = res.plot_predict(end=300)
assert isinstance(fig, Figure)
fig = res.plot_predict(end=300, alpha=None, in_sample=False)
assert isinstance(fig, Figure)
assert isinstance(res.summary(), Summary)
@pytest.mark.smoke
def test_autoreg_predict_smoke(ar_data):
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
hold_back=ar_data.hold_back,
period=ar_data.period,
missing=ar_data.missing,
)
res = mod.fit()
exog_oos = None
if ar_data.exog is not None:
exog_oos = np.empty((1, ar_data.exog.shape[1]))
mod.predict(res.params, 0, 250, exog_oos=exog_oos)
if ar_data.lags == 0 and ar_data.exog is None:
mod.predict(res.params, 0, 350, exog_oos=exog_oos)
if isinstance(ar_data.endog, pd.Series) and (
not ar_data.seasonal or ar_data.period is not None
):
ar_data.endog.index = list(range(ar_data.endog.shape[0]))
if ar_data.exog is not None:
ar_data.exog.index = list(range(ar_data.endog.shape[0]))
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
period=ar_data.period,
missing=ar_data.missing,
)
mod.predict(res.params, 0, 250, exog_oos=exog_oos)
@pytest.mark.matplotlib
def test_parameterless_autoreg():
data = gen_data(250, 0, False)
mod = AutoReg(data.endog, 0, trend="n", seasonal=False, exog=None)
res = mod.fit()
for attr in dir(res):
if attr.startswith("_"):
continue
# TODO
if attr in (
"predict",
"f_test",
"t_test",
"initialize",
"load",
"remove_data",
"save",
"t_test",
"t_test_pairwise",
"wald_test",
"wald_test_terms",
):
continue
attr = getattr(res, attr)
if callable(attr):
attr()
else:
assert isinstance(attr, object)
def test_predict_errors():
data = gen_data(250, 2, True)
mod = AutoReg(data.endog, 3)
res = mod.fit()
with pytest.raises(ValueError, match="exog and exog_oos cannot be used"):
mod.predict(res.params, exog=data.exog)
with pytest.raises(ValueError, match="exog and exog_oos cannot be used"):
mod.predict(res.params, exog_oos=data.exog)
with pytest.raises(ValueError, match="hold_back must be >= lags"):
AutoReg(data.endog, 3, hold_back=1)
with pytest.raises(ValueError, match="freq cannot be inferred"):
AutoReg(data.endog.values, 3, seasonal=True)
mod = AutoReg(data.endog, 3, exog=data.exog)
res = mod.fit()
with pytest.raises(ValueError, match=r"The shape of exog \(200, 2\)"):
mod.predict(res.params, exog=data.exog.iloc[:200])
with pytest.raises(ValueError, match="The number of columns in exog_oos"):
mod.predict(res.params, exog_oos=data.exog.iloc[:, :1])
with pytest.raises(ValueError, match="Prediction must have `end` after"):
mod.predict(res.params, start=200, end=199)
with pytest.raises(ValueError, match="exog_oos must be provided"):
mod.predict(res.params, end=250, exog_oos=None)
mod = AutoReg(data.endog, 0, exog=data.exog)
res = mod.fit()
with pytest.raises(ValueError, match="start and end indicate that 10"):
mod.predict(res.params, end=259, exog_oos=data.exog.iloc[:5])
def test_spec_errors():
data = gen_data(250, 2, True)
with pytest.raises(ValueError, match="lags must be a non-negative scalar"):
AutoReg(data.endog, -1)
with pytest.raises(ValueError, match="All values in lags must be pos"):
AutoReg(data.endog, [1, 1, 1])
with pytest.raises(ValueError, match="All values in lags must be pos"):
AutoReg(data.endog, [1, -2, 3])
@pytest.mark.smoke
def test_dynamic_forecast_smoke(ar_data):
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
hold_back=ar_data.hold_back,
period=ar_data.period,
missing=ar_data.missing,
)
res = mod.fit()
res.predict(dynamic=True)
if ar_data.exog is None:
res.predict(end=260, dynamic=True)
@pytest.mark.smoke
def test_ar_select_order_smoke():
data = sunspots.load().data["SUNACTIVITY"]
ar_select_order(data, 4, glob=True, trend="n")
ar_select_order(data, 4, glob=False, trend="n")
ar_select_order(data, 4, seasonal=True, period=12)
ar_select_order(data, 4, seasonal=False)
ar_select_order(data, 4, glob=True)
ar_select_order(data, 4, glob=True, seasonal=True, period=12)
class CheckAutoRegMixin(CheckARMixin):
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse_stata, DECIMAL_6)
class TestAutoRegOLSConstant(CheckAutoRegMixin):
"""
Test AutoReg fit by OLS with a constant.
"""
@classmethod
def setup_class(cls):
data = sunspots.load()
data.endog.index = list(range(len(data.endog)))
cls.res1 = AutoReg(data.endog, lags=9).fit()
cls.res2 = results_ar.ARResultsOLS(constant=True)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=100),
self.res2.FVOLSnneg1start100,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSdefault,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312,
DECIMAL_4,
)
class TestAutoRegOLSNoConstant(CheckAutoRegMixin):
"""f
Test AR fit by OLS without a constant.
"""
@classmethod
def setup_class(cls):
data = sunspots.load()
cls.res1 = AutoReg(np.asarray(data.endog), lags=9, trend="n").fit()
cls.res2 = results_ar.ARResultsOLS(constant=False)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=100),
self.res2.FVOLSnneg1start100,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSdefault,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312,
DECIMAL_4,
)
@pytest.mark.parametrize("lag", list(np.arange(1, 16 + 1)))
def test_autoreg_info_criterion(lag):
data = sunspots.load()
endog = np.asarray(data.endog)
endog_tmp = endog[16 - lag :]
r = AutoReg(endog_tmp, lags=lag).fit()
# See issue #324 for the corrections vs. R
aic = r.aic
hqic = r.hqic
bic = r.bic
res1 = np.array([aic, hqic, bic, r.fpe])
# aic correction to match R
res2 = results_ar.ARLagResults("const").ic.T
comp = res2[lag - 1, :].copy()
k = 2 + lag
pen = np.array([2, 2 * np.log(np.log(r.nobs)), np.log(r.nobs)])
comp[:3] = -2 * r.llf + pen * k
assert_almost_equal(res1, comp, DECIMAL_6)
r2 = AutoReg(endog, lags=lag, hold_back=16).fit()
assert_allclose(r.aic, r2.aic)
assert_allclose(r.bic, r2.bic)
assert_allclose(r.hqic, r2.hqic)
assert_allclose(r.fpe, r2.fpe)
@pytest.mark.parametrize("old_names", [True, False])
def test_autoreg_named_series(reset_randomstate, old_names):
warning = FutureWarning if old_names else None
dates = period_range(start="2011-1", periods=72, freq="M")
y = Series(np.random.randn(72), name="foobar", index=dates)
with pytest.warns(warning):
results = AutoReg(y, lags=2, old_names=old_names).fit()
if old_names:
idx = Index(["intercept", "foobar.L1", "foobar.L2"])
else:
idx = Index(["const", "foobar.L1", "foobar.L2"])
assert results.params.index.equals(idx)
@pytest.mark.smoke
def test_autoreg_series():
# GH#773
dta = macrodata.load_pandas().data["cpi"].diff().dropna()
dates = period_range(start="1959Q1", periods=len(dta), freq="Q")
dta.index = dates
ar = AutoReg(dta, lags=15).fit()
ar.bse
def test_ar_order_select():
# GH#2118
np.random.seed(12345)
y = arma_generate_sample([1, -0.75, 0.3], [1], 100)
ts = Series(
y,
index=date_range(start=dt.datetime(1990, 1, 1), periods=100, freq="M"),
)
res = ar_select_order(ts, maxlag=12, ic="aic")
assert tuple(res.ar_lags) == (1, 2)
assert isinstance(res.aic, dict)
assert isinstance(res.bic, dict)
assert isinstance(res.hqic, dict)
assert isinstance(res.model, AutoReg)
assert not res.seasonal
assert res.trend == "c"
assert res.period is None
def test_autoreg_constant_column_trend():
sample = np.array(
[
0.46341460943222046,
0.46341460943222046,
0.39024388790130615,
0.4146341383457184,
0.4146341383457184,
0.4146341383457184,
0.3414634168148041,
0.4390243887901306,
0.46341460943222046,
0.4390243887901306,
]
)
with pytest.raises(ValueError, match="The model specification cannot"):
AutoReg(sample, lags=7)
with pytest.raises(ValueError, match="The model specification cannot"):
AutoReg(sample, lags=7, trend="n")
@pytest.mark.parametrize("old_names", [True, False])
def test_autoreg_summary_corner(old_names):
data = macrodata.load_pandas().data["cpi"].diff().dropna()
dates = period_range(start="1959Q1", periods=len(data), freq="Q")
data.index = dates
warning = FutureWarning if old_names else None
with pytest.warns(warning):
res = AutoReg(data, lags=4, old_names=old_names).fit()
summ = res.summary().as_text()
assert "AutoReg(4)" in summ
assert "cpi.L4" in summ
assert "03-31-1960" in summ
with pytest.warns(warning):
res = AutoReg(data, lags=0, old_names=old_names).fit()
summ = res.summary().as_text()
if old_names:
assert "intercept" in summ
else:
assert "const" in summ
assert "AutoReg(0)" in summ
@pytest.mark.smoke
def test_autoreg_score():
data = sunspots.load_pandas()
ar = AutoReg(np.asarray(data.endog), 3)
res = ar.fit()
score = ar.score(res.params)
assert isinstance(score, np.ndarray)
assert score.shape == (4,)
assert ar.information(res.params).shape == (4, 4)
assert_allclose(-ar.hessian(res.params), ar.information(res.params))
def test_autoreg_roots():
data = sunspots.load_pandas()
ar = AutoReg(np.asarray(data.endog), lags=1)
res = ar.fit()
assert_almost_equal(res.roots, np.array([1.0 / res.params[-1]]))
def test_equiv_dynamic(reset_randomstate):
e = np.random.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
for i in range(1, 1001):
y[i] = 0.9 * y[i - 1] + e[i]
mod = AutoReg(y, 1)
res = mod.fit()
pred0 = res.predict(500, 800, dynamic=0)
pred1 = res.predict(500, 800, dynamic=True)
idx = pd.date_range(dt.datetime(2000, 1, 30), periods=1001, freq="M")
y = pd.Series(y, index=idx)
mod = AutoReg(y, 1)
res = mod.fit()
pred2 = res.predict(idx[500], idx[800], dynamic=idx[500])
pred3 = res.predict(idx[500], idx[800], dynamic=0)
pred4 = res.predict(idx[500], idx[800], dynamic=True)
assert_allclose(pred0, pred1)
assert_allclose(pred0, pred2)
assert_allclose(pred0, pred3)
assert_allclose(pred0, pred4)
def test_dynamic_against_sarimax():
rs = np.random.RandomState(12345678)
e = rs.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
for i in range(1, 1001):
y[i] = 0.9 * y[i - 1] + e[i]
smod = SARIMAX(y, order=(1, 0, 0), trend="c")
sres = smod.fit(disp=False)
mod = AutoReg(y, 1)
spred = sres.predict(900, 1100)
pred = mod.predict(sres.params[:2], 900, 1100)
assert_allclose(spred, pred)
spred = sres.predict(900, 1100, dynamic=True)
pred = mod.predict(sres.params[:2], 900, 1100, dynamic=True)
assert_allclose(spred, pred)
spred = sres.predict(900, 1100, dynamic=50)
pred = mod.predict(sres.params[:2], 900, 1100, dynamic=50)
assert_allclose(spred, pred)
def test_predict_seasonal():
rs = np.random.RandomState(12345678)
e = rs.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
effects = 10 * np.cos(np.arange(12) / 11 * 2 * np.pi)
for i in range(1, 1001):
y[i] = 10 + 0.9 * y[i - 1] + e[i] + effects[i % 12]
ys = pd.Series(
y, index=pd.date_range(dt.datetime(1950, 1, 1), periods=1001, freq="M")
)
mod = AutoReg(ys, 1, seasonal=True)
res = mod.fit()
c = res.params.iloc[0]
seasons = np.zeros(12)
seasons[1:] = res.params.iloc[1:-1]
ar = res.params.iloc[-1]
pred = res.predict(900, 1100, True)
direct = np.zeros(201)
direct[0] = y[899] * ar + c + seasons[900 % 12]
for i in range(1, 201):
direct[i] = direct[i - 1] * ar + c + seasons[(900 + i) % 12]
direct = pd.Series(
direct, index=pd.date_range(ys.index[900], periods=201, freq="M")
)
assert_series_equal(pred, direct)
pred = res.predict(900, dynamic=False)
direct = y[899:-1] * ar + c + seasons[np.arange(900, 1001) % 12]
direct = pd.Series(
direct, index=pd.date_range(ys.index[900], periods=101, freq="M")
)
| assert_series_equal(pred, direct) | pandas.testing.assert_series_equal |
import os.path
import newspaper as N
import numpy as np
import pandas as pd
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from scipy.stats import norm, zscore
SITES = {
'CNN': 'http://www.cnn.com/',
'Fox': 'http://www.foxnews.com/',
'NPR': 'https://www.npr.org/sections/news/',
'BBC': 'http://www.bbc.com/news/world/us_and_canada',
'NYT': 'https://www.nytimes.com/',
'WaPo': 'https://www.washingtonpost.com/',
'BNN': 'http://www.breitbart.com/',
'Gdn': 'https://www.theguardian.com/us',
'Pol': 'https://www.politico.com/',
'ABC': 'http://abcnews.go.com/',
'Huff': 'https://www.huffingtonpost.com/'
}
TRUMPISMS = [
'donald j. trump',
'donald j trump',
'donald trump',
'president',
'trump',
'donald',
'administration'
]
def get_trump_score(url):
'''
Gets the average sentiment values for all Trump articles at the given url
:param url:
The website to crawl for articles
:return:
The entity sentiment analysis score and magnitude. See Google cloud api documenation for more info
'''
# Get article text
site = N.build(url, memoize_articles=False)
scores = []
for art in site.articles:
if 'politics' in art.url or 'trump' in art.url.lower():
art.download()
art.parse()
if 'trump' in art.title.lower() or 'trump' in art.text.lower():
scores.append(get_article_score(art.text))
filtered_scores = list(filter(lambda s: s is not None, scores))
if len(filtered_scores) == 0:
return None
sc, mag = np.array(filtered_scores).T
return np.average(sc), np.average(mag)
def get_article_score(text):
'''
Gets the average entity sentiment value for all trumpisms in the given text
:param text:
A long string to analyze using Google cloud api entity sentiment analysis
:return:
An entity score (from -1 to 1) and magnitude. -1 is most negative and 1 is most positive
'''
# Instantiates a client
client = language.LanguageServiceClient()
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
try:
result = client.analyze_entity_sentiment(document=document)
except:
return None
sents = []
for entity in result.entities:
if entity.name.lower() in TRUMPISMS:
sent = entity.sentiment
if sent.score != 0 or sent.magnitude != 0:
sents.append((sent.score, sent.magnitude))
if len(sents) == 0:
return None
score, mag = np.array(sents).T
return np.average(score), np.average(mag)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from seaborn import distplot, barplot
if os.path.isfile('site_data.csv'):
print('Data file found')
data = pd.read_csv('data/site_data.csv')
else:
print('Caluclating new data')
output = list(map(get_trump_score, SITES.values()))
mask = list(map(lambda s: s is not None, output))
scores, mags = list(zip(*np.array(output)[mask]))
sites = np.array(list(SITES))[mask]
data = | pd.DataFrame() | pandas.DataFrame |
## lsdmap_knickpointplotting.py
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## These functions are tools for analysing and plotting knickpoint data
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## FJC
## 05/06/2017
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import colors
import lsdplottingtools.lsdmap_pointtools as LSDMap_PD
import matplotlib.pyplot as plt
import time as clock
from matplotlib import rcParams
import matplotlib.cm as cm
from lsdmapfigure.plottingraster import MapFigure
from lsdmapfigure.plottingraster import BaseRaster
from lsdmapfigure import plottinghelpers as Helper
from lsdplottingtools import colours as lsdcolours
from lsdplottingtools import statsutilities as SUT
from lsdplottingtools import init_plotting_DV
from matplotlib.patches import Rectangle
from matplotlib.ticker import MaxNLocator
from lsdviztools.lsdplottingtools import lsdmap_pointtools as LSDP
from lsdviztools.lsdplottingtools import lsdmap_vectortools as LSDV
import sys
import os
import pandas as pd
from scipy.stats import norm
pd.options.mode.chained_assignment = None
class KP_plotting(object):
"""
This class is a development version of the knickpoint algorithm.
Its aim is to deal with knickpoint picking stuffs via python code,
in a development aim as I am significantly changing methods relatively often.
B.G.
"""
def __init__(self, fpath,fprefix, basin_key = [], source_key = [], min_length = 0, cut_off_val = [0,0,0,0], main_stem = False, normalisation = None, size_kp = [],coeff_size = 1):
"""
This creates a knickpoint object to help with plotting. One object helps you to select certain river and or basins; provide some tool to pre-sort the dataset, and generate
stats figures, profiles and map. It requires the output of LSDTopoTools.
params:
fpath (str): path to the files generated by LSDTopoTool
fprefix (str): common prefix to all the files
basin_key (list of int): list of basin to extract from the global dataset. All basins selected if empty list provided. WARNING: if you select some sources that are not in your basins, it won't be selected.
source_key (list of int): list of sources to extract from the global dataset. All basins selected if empty list provided. WARNING: if you select some sources that are not in your basins, it won't be selected.
min_length (float): You can choose to ignore all the rivers below a threshold, to avoid dealing with tiny rivers.
cut_off_val (list of float): cut_off values for the knickpoints magnitude: [negative_delta_ksn,positive_delta_ksn,negative_delta_segelev,positive_delta_segelev].
for example [-1,2,-2,3] would select the data where (dksn<-1 AND dksn >2) OR (dsegelev<-2 AND dsegelev)
main_stem (bool): Only the main stem of each basins
normalisation (str): "relative" or "absolute" reset the elevation to the minimum/maximum of a basin
size_of_kp: leave blank for auto, otherwise [min_value,size,max_value,size]. For example [1.5,5,4.5,10] would plot all the knickpoints <= 1.5 with a size of 5, all the knickpoints >= 4.5 with a size of 10 and everything in between graduadly increasing.
author: B.G - 2017/2018
"""
print("Let me first preprocess and check your files")
# Loading the attributes
self.fpath = fpath # the path of your file : /home/your/path/
self.fprefix = fprefix # the common prefix of all your files
# Loading the files
print("Loading the knickpoint-related files")
try:
self.df_river = Helper.ReadMChiSegCSV(self.fpath, self.fprefix, type = "knickpoint") # Contains the river info
self.df_rivraw = Helper.ReadMChiSegCSV(self.fpath, self.fprefix, type = "knickpoint") # Contains the river info (will not be thinned by your selection choices)
self.df_kp_raw = Helper.ReadKnickpointCSV(self.fpath, self.fprefix, ftype = "raw") # Contains the raw knickpint info (before TVD or else) -> Debugging purposes
self.df_kp = Helper.ReadKnickpointCSV(self.fpath, self.fprefix) # Contains the knickpoint location and informations
self.df_SK = Helper.readSKKPstats(self.fpath, self.fprefix) # Contains few metrics per river keys
self.df_kp_ksn = self.df_kp[self.df_kp["delta_ksn"] != 0]
self.df_kp_stepped = self.df_kp[self.df_kp["delta_segelev"] > 0]
except IOError:
print("I didnae find your knickpoint related files make sure that:")
print("- You ran the knickpoint analysis")
print("- Your path is good and finishing by '/' e.g. /home/name/kazakhstan/ ")
print("- Check your writing path in your param file used with chi_mapping_tool.exe, a wrong path won't trigger error but won't write files.")
quit()
print("Managing the data:")
if(normalisation != None):
self.normalise_elevation(method = normalisation)
if(isinstance(cut_off_val,str) and (cut_off_val == "auto")):
cut_off_val = [self.df_kp_ksn["delta_ksn"].quantile(0.25),self.df_kp_ksn["delta_ksn"].quantile(0.75),-10000,self.df_kp_stepped["delta_segelev"].quantile(0.75)]
if(cut_off_val != [0,0,0,0]):
print("I am selecting your knickpoints")
# This selection process is a bit messy, but really efficient with pandas!
print(cut_off_val)
self.df_kp = self.df_kp[((self.df_kp["delta_ksn"] <= cut_off_val[0]) | (self.df_kp["delta_ksn"] >= cut_off_val[1])) | ((self.df_kp["delta_segelev"] <= cut_off_val[2]) | (self.df_kp["delta_segelev"] >= cut_off_val[3]))]
self.df_kp_ksn = self.df_kp[((self.df_kp["delta_ksn"] <= cut_off_val[0]) | (self.df_kp["delta_ksn"] >= cut_off_val[1]))]
self.df_kp_stepped = self.df_kp[((self.df_kp["delta_segelev"] <= cut_off_val[2]) | (self.df_kp["delta_segelev"] >= cut_off_val[3]))]
# Selection of Basins and sources
if(basin_key == []):
print("All the basins are selected:")
print(self.df_SK["basin_key"].unique().tolist())
else:
print("You selected the following basins:")
print(basin_key)
self.df_river = self.df_river[self.df_river["basin_key"].isin(basin_key)]
self.df_kp_raw = self.df_kp_raw[self.df_kp_raw["basin_key"].isin(basin_key)]
self.df_kp = self.df_kp[self.df_kp["basin_key"].isin(basin_key)]
self.df_SK = self.df_SK[self.df_SK["basin_key"].isin(basin_key)]
self.df_kp_ksn = self.df_kp_ksn[self.df_kp_ksn["basin_key"].isin(basin_key)]
self.df_kp_stepped = self.df_kp_stepped[self.df_kp_stepped["basin_key"].isin(basin_key)]
if(source_key == [] and min_length == 0):
print("All the sources are selected:")
print(self.df_SK["source_key"].unique().tolist())
elif(min_length > 0):
print("Let me remove the river smaller than " +str(min_length))
self.df_SK = self.df_SK[self.df_SK["length"]>min_length]
source_key = self.df_SK["source_key"].unique()
self.df_river = self.df_river[self.df_river["source_key"].isin(source_key)]
self.df_kp_raw = self.df_kp_raw[self.df_kp_raw["source_key"].isin(source_key)]
self.df_kp = self.df_kp[self.df_kp["source_key"].isin(source_key)]
self.df_SK = self.df_SK[self.df_SK["source_key"].isin(source_key)]
self.df_kp_ksn = self.df_kp_ksn[self.df_kp_ksn["source_key"].isin(source_key)]
self.df_kp_stepped = self.df_kp_stepped[self.df_kp_stepped["source_key"].isin(source_key)]
print("You selected the following Sources: ")
print(source_key)
else:
print("You selected the following Sources: ")
print(source_key)
self.df_river = self.df_river[self.df_river["source_key"].isin(source_key)]
self.df_kp_raw = self.df_kp_raw[self.df_kp_raw["source_key"].isin(source_key)]
self.df_kp = self.df_kp[self.df_kp["source_key"].isin(source_key)]
self.df_SK = self.df_SK[self.df_SK["source_key"].isin(source_key)]
self.df_kp_ksn = self.df_kp_ksn[self.df_kp_ksn["source_key"].isin(source_key)]
self.df_kp_stepped = self.df_kp_stepped[self.df_kp_stepped["source_key"].isin(source_key)]
if(main_stem):
print("Wait, you just want the main stem, let me deal with that")
source_key = []
for bas in self.df_SK["basin_key"].unique():
TSK = self.df_SK[self.df_SK["basin_key"] == bas]
ts = TSK["source_key"][TSK["length"] == TSK["length"].max()].values[0]
source_key.append(ts)
self.df_river = self.df_river[self.df_river["source_key"].isin(source_key)]
self.df_kp_raw = self.df_kp_raw[self.df_kp_raw["source_key"].isin(source_key)]
self.df_kp = self.df_kp[self.df_kp["source_key"].isin(source_key)]
self.df_SK = self.df_SK[self.df_SK["source_key"].isin(source_key)]
self.df_kp_ksn = self.df_kp_ksn[self.df_kp_ksn["source_key"].isin(source_key)]
self.df_kp_stepped = self.df_kp_stepped[self.df_kp_stepped["source_key"].isin(source_key)]
print("final source_keys are: ")
print(source_key)
#### Now dealing with the size of knickpoints on map/profile.
# By default I am setting the minimum size to the 1st quartile and the maximum to the 3rd quartile
if(len(size_kp)!= 4):
size_kp.append(self.df_kp_ksn["delta_ksn"].abs().quantile(0.25)) # min val for sizing dksn kps = every knickpoints below this value will have the same (minimum) size
size_kp.append(self.df_kp_ksn["delta_segelev"].abs().quantile(0.25)) # MIN VALUE FOR STEPPED KNICKPOINT IS under work
size_kp.append(self.df_kp_ksn["delta_ksn"].abs().quantile(0.75)) # max val for sizing dksn kps = every knickpoints below this value will have the same (maximum) size
size_kp.append(self.df_kp_ksn["delta_segelev"].abs().quantile(0.75)) # MAX VALUE FOR STEPPED KNICKPOINT IS under work
print("SIZE GLOBAL WARNING :Automatically sizing your knickpoint range: all knikcpoints below %s will have the minimum size and all knickpoints above %s the maximum in absolute values." %(size_kp[0],size_kp[2]))
# applying the size column
## I Normalize the size
minsize = 0.2
self.df_kp_ksn["size_kp"] = pd.Series(data = self.df_kp_ksn["delta_ksn"].abs(), index = self.df_kp_ksn.index)
## Recasting the knickpoints into a range (everything below a threshold will have the same minimum value and above another thrshold another maximum value)
self.df_kp_ksn["size_kp"][self.df_kp_ksn["delta_ksn"].abs() <= size_kp[0]] = size_kp[0]
self.df_kp_ksn["size_kp"][self.df_kp_ksn["delta_ksn"].abs() >= size_kp[2]] = size_kp[2]
## Applying a coeff
# self.df_kp_ksn["size_kp"] += 0.01
self.df_kp_ksn["size_kp"] = self.df_kp_ksn["size_kp"] /self.df_kp_ksn["size_kp"].max()
self.df_kp_ksn["size_kp"] = self.df_kp_ksn["size_kp"] - self.df_kp_ksn["size_kp"].min() + minsize
self.df_kp_ksn["size_kp"] *= coeff_size
# plt.hist(self.df_kp_ksn["size_kp"].values, bins = 35)
# plt.savefig("TESTHIST.png")
# quit()
# Same the general dataset
self.df_kp["size_kp"] = pd.Series(data = self.df_kp["delta_ksn"].abs(), index = self.df_kp.index)
self.df_kp["size_kp"][self.df_kp["delta_ksn"].abs() <= size_kp[0]] = size_kp[0]
self.df_kp["size_kp"][self.df_kp["delta_ksn"].abs() >= size_kp[2]] = size_kp[2]
# self.df_kp["size_kp"] += 0.01
self.df_kp["size_kp"] = self.df_kp["size_kp"]/self.df_kp["size_kp"].max()
self.df_kp["size_kp"] = self.df_kp["size_kp"] - self.df_kp["size_kp"].min() +minsize
self.df_kp["size_kp"] *= coeff_size
# applying the size column to the step
## I Normalize the size
self.df_kp_stepped["size_kp_step"] = pd.Series(data = self.df_kp_stepped["delta_segelev"].abs(), index = self.df_kp_stepped.index)
## Recasting the knickpoints into a range (everything below a threshold will have the same minimum value and above another thrshold another maximum value)
# self.df_kp_stepped["size_kp_step"][self.df_kp_stepped["delta_segelev"].abs() <= size_kp[1]] = size_kp[1]
self.df_kp_stepped["size_kp_step"][self.df_kp_stepped["delta_segelev"].abs() >= size_kp[3]] = size_kp[3]
## Applying a coeff
# self.df_kp_stepped["size_kp_step"] += 0.01
self.df_kp_stepped["size_kp_step"] = self.df_kp_stepped["size_kp_step"]/self.df_kp_stepped["size_kp_step"].max()
self.df_kp_stepped["size_kp_step"] = self.df_kp_stepped["size_kp_step"] - self.df_kp_stepped["size_kp_step"].min() + minsize
self.df_kp_stepped["size_kp_step"] *= coeff_size
# Same the general dataset
self.df_kp["size_kp_step"] = pd.Series(data = self.df_kp["delta_segelev"].abs(), index = self.df_kp.index)
# self.df_kp["size_kp_step"][self.df_kp["delta_segelev"].abs() <= size_kp[1]] = size_kp[1]
self.df_kp["size_kp_step"][self.df_kp["delta_segelev"].abs() >= size_kp[3]] = size_kp[3]
# self.df_kp["size_kp_step"] += 0.01
self.df_kp["size_kp_step"] =self.df_kp["size_kp_step"]/self.df_kp["size_kp_step"].max()
self.df_kp["size_kp_step"] = self.df_kp["size_kp_step"] - self.df_kp["size_kp_step"].min() + minsize
self.df_kp["size_kp_step"] *= coeff_size
# Just getting rid of few NoData
self.df_river["m_chi"][self.df_river["m_chi"] == -9999] = 0
print("Min dksn: %s - max dksn: %s - min dseg: %s - max dseg: %s" %(self.df_kp["delta_ksn"].min(),self.df_kp["delta_ksn"].max(),self.df_kp["delta_segelev"].min(), self.df_kp["delta_segelev"].max()))
print("After all the thinning process, it remains %s dksn knickpoints, and %s dsegelev knickpoints" %(self.df_kp_ksn.shape[0],self.df_kp_stepped.shape[0]))
print("Done now")
######################################################################################################################
####################### A first set of general functions to prepare the data/figures #################################
######################################################################################################################
# .
# /\ /l
# ((.Y(!
# \ |/
# / 6~6,
# \ _ +-.
# \`-=--^-'
# \ \
# _/ \
# ( . Y
# /"\ `--^--v--.
# / _ `--"T~\/~\/
# / " ~\. !
# _ Y Y./'
# Y^| | |~~7
# | l | / ./'
# | `L | Y .^/~T
# | l ! | |/| |
# | .`\/' | Y | !
# l "~ j l j_L______
# \,____{ __"~ __ ,\_,\_
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_fig_right_size(self, size = "esurf", n_axis =1 ,facecolor = 'white'):
"""
return a matplotlib fig object presized
param:
size = size code (esurf,geomorphology,big or tuple/array of size)
n_axis: number of axis
"""
# Cheching the type of input
if (isinstance(size,str)):
if size.lower() == "geomorphology":
fig = plt.figure(n_axis, facecolor = facecolor, figsize=(6.25,3.5))
elif size.lower() == "big":
fig = plt.figure(n_axis, facecolor = facecolor, figsize=(16,9))
elif size.lower() == "esurf":
fig = plt.figure(n_axis, facecolor = facecolor, figsize=(4.92126,3.5))
else:
print("I did not understood your format input (%s), I am defaulting to esurf." %(size))
fig = plt.figure(n_axis, facecolor = facecolor, figsize=(4.92126,3.5))
if ((isinstance(size,tuple)) or (isinstance(size,list))):
if len(size) == 2:
fig = plt.figure(n_axis, facecolor = facecolor, figsize=(size[0], size[1]))
else:
print("I did not understood your format input (%s), I am defaulting to esurf." %(size))
fig = plt.figure(n_axis, facecolor = facecolor, dpi = 600)
return fig
def get_figwidth_right_size(self, size = "esurf"):
"""
return a matplotlib fig object presized
param:
size = size code (esurf,geomorphology,big or tuple/array of size)
n_axis: number of axis
"""
# Cheching the type of input
if (isinstance(size,str)):
if size.lower() == "geomorphology":
wsize = 6.25
elif size.lower() == "big":
wsize = 16
elif size.lower() == "esurf":
wsize = 4.92126
else:
print("I did not understood your format input (%s), I am defaulting to esurf." %(size))
wsize = 4.92126
if ((isinstance(size,tuple)) or (isinstance(size,list))):
if len(size) == 2:
wsize = size[0]
else:
print("I did not understood your format input (%s), I am defaulting to esurf." %(size))
wsize = 4.92126
return wsize
def normalise_elevation(self,method = "relative"):
"""
Normalise the elevation to the outlet of the basin in a relative way (outlet = 0 and elevation = old elevation - outlet elevation) or
absolute way: outlet = 0 and maximum elevation = 1
"""
for bas in self.df_SK["basin_key"].unique():
norm_elev = self.df_river["elevation"][self.df_river["basin_key"] == bas].min()
self.df_river["elevation"][self.df_river["basin_key"] == bas] -= norm_elev
self.df_rivraw["elevation"][self.df_rivraw["basin_key"] == bas] -= norm_elev
self.df_kp_raw["elevation"][self.df_kp_raw["basin_key"] == bas] -= norm_elev
self.df_kp["elevation"][self.df_kp["basin_key"] == bas] -= norm_elev
self.df_kp_ksn["elevation"][self.df_kp_ksn["basin_key"] == bas] -= norm_elev
self.df_kp_stepped["elevation"][self.df_kp_stepped["basin_key"] == bas] -= norm_elev
if(method == "absolute"):
norm_elev = self.df_river["elevation"][self.df_river["basin_key"] == bas].max()
self.df_river["elevation"][self.df_river["basin_key"] == bas] /= norm_elev
self.df_rivraw["elevation"][self.df_rivraw["basin_key"] == bas] /= norm_elev
self.df_kp_raw["elevation"][self.df_kp_raw["basin_key"] == bas] /= norm_elev
self.df_kp["elevation"][self.df_kp["basin_key"] == bas] /= norm_elev
self.df_kp_ksn["elevation"][self.df_kp_ksn["basin_key"] == bas] /= norm_elev
self.df_kp_stepped["elevation"][self.df_kp_stepped["basin_key"] == bas] /= norm_elev
######################################################################################################################
########################################### The plotting figures #####################################################
######################################################################################################################
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$**$$$$$$$$$**$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$" ^$$$$$$F *$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$ z$$$$$$L ^$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$ e$$$$$$$$$e J$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$eee$$$$$$$$$$$$$e$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$b$$$$$$$$$$$$$$$$$$*$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$)$$$$P"e^$$$F$r*$$$$F"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$d$$$$ "z$$$$" $$$$% $3$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$*"""*$$$ .$$$$$$ z$$$* ^$e*$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$" *$$ee$$$$$$$$$$*" $$$C$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$. "***$$"*"$$"" $$$$e*$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$b "$b.$$" $$$$$b"$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$c. """ $$$$$$$^$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$e.. $$$$$$$$^$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$eeee.. J$$$$$$$$b"$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$r z$$$$$$$$$$r$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$" z$$$$$**$$$$$^$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$*" z$$$P" ^*$$$ $$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$*" .d$$$$ $$$ $$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$" .e$$$$$F 3$$ $$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$. .d$$$$$$$ $PJ$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$eeeeeeed$*""""**"" $\$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $d$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$. $$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$e. d$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$eeeeeee$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def print_ksn_profile(self,size = "big", format = "png", x_axis = "chi", y_axis = "m_chi" , knickpoint = True, title = "none", label_size = 8, facecolor = 'white',
size_of_ksn = 4, legend = True, size_of_TVD_ksn = 3):
"""
print a plot for each source keys selected with ksn value function to Chi or Flow Distance.
param:
size: the size of the figure, default big.
format: format of the output: "png", "svg" or "show".
x_axis: The coordinates to print the data: "chi" or "flow distance"
knickpoint: True or False to display it or not.
title: "none" for no title, "auto" for displaying the source key.
size_of_ksn: size of the m_chi (ksn) points before processing
legend: if True, will plot the legend
Author: B.G. - 25/01/2018
"""
# check if a directory exists for the chi plots. If not then make it.
out_directory = self.fpath+'river_plots/'
if not os.path.isdir(out_directory):
print("I am creating the river_plot/ directory to save your figures")
os.makedirs(out_directory)
# Adjust the corrected y_axis
if(y_axis == "m_chi"):
corrected_y_axis = "TVD_ksn"
y_kp = "delta_ksn"
ylab = r"$k_{sn}$"
elif(y_axis == "b_chi"):
corrected_y_axis = "TVD_b_chi"
y_kp = "delta_segelev"
ylab = r"$b_{\chi}$"
elif(y_axis == "segmented_elevation"):
y_axis = "segdrop"
corrected_y_axis = "TVD_elevation"
y_kp = "delta_segelev"
ylab = r"$elevation$"
else:
print("Non valid y-axis it has to be b_chi or m_chi (= ksn)")
quit()
# Set up fonts for plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Liberation Sans'] # Liberation Sans is a free alternative to Arial. Albeit being quite universal, Arial is propietary. #PRAISE_FREE_AND_OPENSOURCE
rcParams['font.size'] = label_size
for sources in self.df_SK["source_key"].unique():
# Select the data
this_df_SK = self.df_SK[self.df_SK["source_key"] == sources]
this_df_kp = self.df_kp[self.df_kp["source_key"] == sources]
this_df_kp = this_df_kp[this_df_kp["out"] == 1]
this_df_kp_raw = self.df_kp_raw[self.df_kp_raw["source_key"] == sources]
this_df_river = self.df_river[self.df_river["source_key"] == sources]
# Create a figure with required dimensions
n_axis = 1
fig = self.get_fig_right_size(size = size, n_axis =1 , facecolor = facecolor)
# create the axis using the gridspec method
gs = plt.GridSpec(100,100,bottom=0.15,left=0.10,right=0.90,top=0.95)
ax2 = fig.add_subplot(gs[0:100,0:100], facecolor = "None")
gs = plt.GridSpec(100,100,bottom=0.15,left=0.10,right=0.90,top=0.95)
ax1 = fig.add_subplot(gs[0:100,0:100], facecolor = "None")
# plotting the ksn
## not processed (in the back and quite transparent)
ax1.scatter(this_df_river[x_axis],this_df_river[y_axis], s = size_of_ksn, c = "r", lw =0, alpha = 0.3, label = "ksn (before TVD)")
ax1.scatter(this_df_river[x_axis],this_df_river[corrected_y_axis], s = size_of_TVD_ksn, c ="k", lw =0, alpha = 1, label = "ksn (TVD)")
## Getting the extents of this first plot to apply it to the knickpoint one
this_xlim = ax1.get_xlim()
# Label
if(x_axis == "chi"):
xlab = r"$\chi$"
elif(x_axis == "flow_distance"):
xlab = "Distance from the outlet (m)"
ax1.set_xlabel(xlab)
ax1.set_ylabel(ylab)
if(knickpoint):
this_df_kp_pos = this_df_kp[this_df_kp[y_kp]>0]
this_df_kp_neg = this_df_kp[this_df_kp[y_kp]<0]
ax2.scatter(this_df_kp_pos[x_axis], this_df_kp_pos[y_kp], marker = "s", s = 5, c = "#E79A00")
ax2.scatter(this_df_kp_neg[x_axis], this_df_kp_neg[y_kp], marker = "s", s = 5, c = "#2939FF")
# Adapting hte extents
ax2.set_xlim(this_xlim)
# Title
if(title.lower() == "auto"):
this_title = "source %s" %(sources)
elif(title.lower() != "none"):
this_title = title
if(title.lower() != "none"):
extra = ax1.add_patch(Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0, label = this_title))
# Legend
ax1.legend(loc = 0) # 1 = upper right 0 - best choice
ax2.xaxis.set_visible(False)
if(knickpoint):
ax2.yaxis.set_ticks_position("right")
ax2.yaxis.set_label_position("right")
ax2.set_ylabel(r"$Knickpoint \/ \Delta k_{sn}$") # the \/ add a space in between, the mathematical expression compiling can screw this a bit
else:
ax2.yaxis.set_visible(False)
# Saving the figure
plt.savefig(out_directory + self.fprefix+"_ksn_source_%s_%s.%s"%(sources,y_axis,format), dpi = 500)
plt.clf()
# switching to the next figure
# End of this function
def print_river_profile(self,size = "big", format = "png", x_axis = "chi", knickpoint = True, title = "none", label_size = 8, facecolor = 'white',
size_of_river = 0.5, legend = True, size_of_TVD_ksn = 3, up_set = 40, kalib = False, binning = "source_key", print_seg_elev = False, size_recasting = []):
"""
"""
# check if a directory exists for the chi plots. If not then make it.
out_directory = self.fpath+'river_plots/'
if not os.path.isdir(out_directory):
print("I am creating the river_plot/ directory to save your figures")
os.makedirs(out_directory)
# Set up fonts for plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Liberation Sans'] # Liberation Sans is a free alternative to Arial. Albeit being quite universal, Arial is propietary. #PRAISE_FREE_AND_OPENSOURCE
rcParams['font.size'] = label_size
for sources in self.df_SK[binning].unique():
# Select the data
this_df_SK = self.df_SK[self.df_SK[binning] == sources]
this_df_kp_ksn = self.df_kp_ksn[self.df_kp_ksn[binning] == sources]
this_df_kp_stepped = self.df_kp_stepped[self.df_kp_stepped[binning] == sources]
this_df_dksn_pos = this_df_kp_ksn[this_df_kp_ksn["sign"] == 1]
this_df_dksn_neg = this_df_kp_ksn[this_df_kp_ksn["sign"] == -1]
this_df_dsegelev_pos = this_df_kp_stepped[this_df_kp_stepped["delta_segelev"]> 0]
this_df_kp_raw = self.df_kp_raw[self.df_kp_raw[binning] == sources]
this_df_river = self.df_river[self.df_river[binning] == sources]
# Dealing with the knickpoint offset
up_set = (this_df_river["elevation"].max() - this_df_river["elevation"].min())*0.1
if(this_df_kp_ksn.shape[0]> 0 or this_df_dsegelev_pos.shape[0] > 0):
# Create a figure with required dimensions
n_axis = 1
fig = self.get_fig_right_size(size = size, n_axis =1 , facecolor = facecolor)
gs = plt.GridSpec(100,100,bottom=0.15, left=0.10, right=0.95, top=0.95)
ax1 = fig.add_subplot(gs[0:100,0:100], facecolor = "None")
#plot the long/Chi profile
ax1.scatter(this_df_river[x_axis], this_df_river["elevation"], lw =0 , s = size_of_river, c = "#174D9C" , zorder = 3)
# Plotting the river noes that does contain knickpoint to check combining
ax1.scatter(this_df_river[x_axis][this_df_river["ksnkp"]!=0], this_df_river["elevation"][this_df_river["ksnkp"]!=0], lw =0 , s = size_of_river, c = this_df_river["ksnkp"][this_df_river["ksnkp"]!=0], cmap = "RdBu_r" , zorder = 4)
# this can be printed to adapt the ksn extraction parameters from Mudd et al. 2014
if (print_seg_elev):
cb1 = ax1.scatter(this_df_river[x_axis], this_df_river["segmented_elevation"], lw =0 , s = size_of_river/2, c = "k", alpha = 0.5, zorder = 3)
# Plot the dksn knickpionts
## First normalized the size
# sizing = self.df_kp_ksn.copy()
# sizing["delta_ksn"] = sizing["delta_ksn"].abs()
# sizing = sizing["delta_ksn"][sizing[binning] == sources].values
# if(len(size_recasting) == 2):
# sizing[sizing<size_recasting[0]] = size_recasting[0]
# sizing[sizing>size_recasting[1]] = size_recasting[1]
try:
# sizing = sizing/np.max(sizing)
# sizing += 0.01
# sizing = sizing * coeff_size
## plot the triangles
ax1.scatter(this_df_dksn_pos[x_axis], this_df_dksn_pos["elevation"] + up_set, s = this_df_dksn_pos["size_kp"], lw = 0, marker = "^", c = "r", alpha = 0.95, zorder = 5)
ax1.scatter(this_df_dksn_neg[x_axis], this_df_dksn_neg["elevation"] + up_set, s = this_df_dksn_neg["size_kp"], lw = 0, marker = "v", c = "b", alpha = 0.95, zorder = 5)
## plot the contours
ax1.scatter(this_df_dksn_pos[x_axis], this_df_dksn_pos["elevation"] + up_set, s = this_df_dksn_pos["size_kp"], lw = 0.5, marker = "^", facecolor = "none", edgecolor = "k", alpha = 0.95, zorder = 5)
ax1.scatter(this_df_dksn_neg[x_axis], this_df_dksn_neg["elevation"] + up_set, s = this_df_dksn_neg["size_kp"], lw = 0.5, marker = "v", facecolor = "none", edgecolor = "k", alpha = 0.95, zorder = 5)
except ValueError:
print("No ksn knickpoint on source " + str(sources))
# Plot the dksn knickpionts
## First normalized the size
# size_pos = this_df_dsegelev_pos["delta_segelev"]/this_df_kp_stepped["delta_segelev"].max()*3
##plt the bars
ax1.scatter(this_df_dsegelev_pos[x_axis], this_df_dsegelev_pos["elevation"] - up_set, s = 10, lw = 1.5, marker = "|", c = "#CB9A00", alpha = 0.95, zorder = 5)
#Plot vertical bars in beetween
ax1.vlines(this_df_dksn_neg[x_axis], this_df_dksn_neg["elevation"], this_df_dksn_neg["elevation"] + up_set, zorder = 1, lw = 0.15 )
ax1.vlines(this_df_dksn_pos[x_axis], this_df_dksn_pos["elevation"], this_df_dksn_pos["elevation"] + up_set, zorder = 1, lw = 0.15 )
ax1.vlines(this_df_dsegelev_pos[x_axis], this_df_dsegelev_pos["elevation"] - up_set, this_df_dsegelev_pos["elevation"], zorder = 1, lw = 0.15 )
if(kalib):
kal = pd.read_csv("/home/s1675537/PhD/LSDTopoData/knickpoint/test_location_paper/Smugglers_SC/field_kp/calib_jointed.csv")
kal = kal[kal[binning] == sources]
colaray = kal["type"].values
colaray[colaray == "bases"] = "#A002D3"
colaray[colaray == "lips"] = "#57B300"
ax1.scatter(kal[x_axis],kal["elevation"], marker = "x", s = 7, lw = 0.4, zorder = 2, c = colaray)
if(x_axis == "chi"):
ax1.set_xlabel(r"$\chi$ (m)")
else:
ax1.set_xlabel("Distance from the outlet (m)")
ax1.set_ylabel("z (m)")
# Title
if(title.lower() == "auto"):
this_title = "source %s" %(sources)
elif(title.lower() != "none"):
this_title = title
if(title.lower() != "none"):
extra = ax1.add_patch(Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0, label = this_title))
ax1.legend([extra],[this_title], loc = 0) # 1 = upper right 0 - best choice
# Saving the figure
plt.savefig(out_directory + self.fprefix + "_%s_%s_%s.%s"%(binning,sources,x_axis,format), dpi = 500)
plt.clf()
# switching to the next figure
def print_classic_basin_profile(self,size = "big", format = "png", x_axis = "chi", knickpoint = True, label_size = 8, facecolor = 'white',
size_of_river = 0.5, kalib = False, binning = "basin_key", print_seg_elev = False, size_recasting = [], neg = True, pos = False, step = False):
"""
This function will print a basic version of a basin-wide river profile, displaying the knickpoints in a "classical way": important concavity-to-convexity change.
"""
# check if a directory exists for the chi plots. If not then make it.
out_directory = self.fpath+'river_plots/'
if not os.path.isdir(out_directory):
print("I am creating the river_plot/ directory to save your figures")
os.makedirs(out_directory)
# Set up fonts for plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Liberation Sans'] # Liberation Sans is a free alternative to Arial. Albeit being quite universal, Arial is propietary. #PRAISE_FREE_AND_OPENSOURCE
rcParams['font.size'] = label_size
for sources in self.df_SK[binning].unique():
# Select the data
this_df_SK = self.df_SK[self.df_SK[binning] == sources]
this_df_kp_ksn = self.df_kp_ksn[self.df_kp_ksn[binning] == sources]
this_df_kp_stepped = self.df_kp_stepped[self.df_kp_stepped[binning] == sources]
this_df_dksn_pos = this_df_kp_ksn[this_df_kp_ksn["sign"] == 1]
this_df_dksn_neg = this_df_kp_ksn[this_df_kp_ksn["sign"] == -1]
this_df_dsegelev_pos = this_df_kp_stepped[this_df_kp_stepped["delta_segelev"]> 0]
this_df_kp_raw = self.df_kp_raw[self.df_kp_raw[binning] == sources]
this_df_river = self.df_river[self.df_river[binning] == sources]
if(this_df_kp_ksn.shape[0]> 0 or this_df_dsegelev_pos.shape[0] > 0):
# Create a figure with required dimensions
n_axis = 1
fig = self.get_fig_right_size(size = size, n_axis =1 , facecolor = facecolor)
gs = plt.GridSpec(100,100,bottom=0.15, left=0.10, right=0.95, top=0.95)
ax1 = fig.add_subplot(gs[0:100,0:100], facecolor = "None")
#plot the long/Chi profile
ax1.scatter(this_df_river[x_axis], this_df_river["elevation"], lw =0 , s = size_of_river, c = "#174D9C" , zorder = 3, label = "")
# Plotting the river noes that does contain knickpoint to check combining
# ax1.scatter(this_df_river[x_axis][this_df_river["ksnkp"]!=0], this_df_river["elevation"][this_df_river["ksnkp"]!=0], lw =0 , s = size_of_river, c = this_df_river["ksnkp"][this_df_river["ksnkp"]!=0], cmap = "RdBu_r" , zorder = 4)
# this can be printed to adapt the ksn extraction parameters from Mudd et al. 2014
if (print_seg_elev):
cb1 = ax1.scatter(this_df_river[x_axis], this_df_river["segmented_elevation"], lw =0 , s = size_of_river/2, c = "k", alpha = 0.5, zorder = 3, label = "")
try:
## plot the triangles
if(neg):
ax1.scatter(this_df_dksn_neg[x_axis], this_df_dksn_neg["elevation"], s = this_df_dksn_neg["size_kp"], lw = 0, marker = "o", c = "#00F1EA", alpha = 0.95, zorder = 5, label = r"-$\Delta k_{sn}$")
ax1.scatter(this_df_dksn_neg[x_axis], this_df_dksn_neg["elevation"], s = this_df_dksn_neg["size_kp"], lw = 0.5, marker = "o", facecolor = "none", edgecolor = "k", alpha = 0.90, zorder = 5, label = "")
## plot the contours
if(pos):
ax1.scatter(this_df_dksn_pos[x_axis], this_df_dksn_pos["elevation"], s = this_df_dksn_pos["size_kp"], lw = 0, marker = "o", c = "r", alpha = 0.95, zorder = 5, label = r"+$\Delta k_{sn}$")
ax1.scatter(this_df_dksn_pos[x_axis], this_df_dksn_pos["elevation"], s = this_df_dksn_pos["size_kp"], lw = 0.5, marker = "o", facecolor = "none", edgecolor = "k", alpha = 0.95, zorder = 5, label = "")
except ValueError:
print("No ksn knickpoint on source " + str(sources))
if(step):
ax1.scatter(this_df_dsegelev_pos[x_axis], this_df_dsegelev_pos["elevation"], s = 50, lw = 0, marker = "s", c = "yellow", alpha = 0.95, zorder = 4.5, label = "Step")
ax1.scatter(this_df_dsegelev_pos[x_axis], this_df_dsegelev_pos["elevation"], s = 50, lw = 0.5, marker = "s", edgecolor = "k",facecolor = "none", alpha = 0.95, zorder = 4.5, label = "")
if(kalib):
kal = pd.read_csv("/home/s1675537/PhD/LSDTopoData/knickpoint/test_location_paper/Smugglers_SC/field_kp/calib_jointed.csv")
kal = kal[kal[binning] == sources]
colaray = kal["type"].values
colaray[colaray == "bases"] = "#A002D3"
colaray[colaray == "lips"] = "#57B300"
ax1.scatter(kal[x_axis],kal["elevation"], marker = "x", s = 7, lw = 0.4, c = colaray, label = "", zorder = 250)
# Cleaning the legend
handles, labels = ax1.get_legend_handles_labels()
thandles= []
tlabels = []
for l in range(len(labels)):
if labels[l] != "":
thandles.append(handles[l])
tlabels.append(labels[l])
ax1.legend(handles,labels)
if(x_axis == "chi"):
ax1.set_xlabel(r"$\chi$ (m)")
else:
ax1.set_xlabel("Distance from the outlet (m)")
ax1.set_ylabel("z (m)")
# Saving the figure
particule = "_classical_%s_%s_%s"%(binning,sources,x_axis)
if(neg):
particule+= "_drop"
if(pos):
particule+= "_raise"
if(step):
particule+= "_step"
plt.savefig(out_directory + self.fprefix+particule + ".%s"%(format), dpi = 500)
plt.clf()
# switching to the next figure
def print_classic_map(self,size = "big", format = "png", black_bg = False, scale_points = True, label_size = 8, size_kp = 20, return_fig = False,
extent_cmap = [], kalib = False,lith_raster = False,cml = None, unicolor_kp = None, size_stepped_kp_map = 1.8, neg = True, pos = False, step = False):
# check if a directory exists for the chi plots. If not then make it.
raster_directory = self.fpath+'raster_plots/'
if not os.path.isdir(raster_directory):
os.makedirs(raster_directory)
# Set up fonts for plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Liberation Sans'] # Liberation Sans is a free alternative to Arial. Albeit being quite universal, Arial is propietary. #PRAISE_FREE_AND_OPENSOURCE
rcParams['font.size'] = label_size
# set figure sizes based on format
fig_width_inches = self.get_figwidth_right_size(size = size)
# get the rasters
raster_ext = '.bil'
BackgroundRasterName = self.fprefix+raster_ext
HillshadeName = self.fprefix+'_hs'+raster_ext
BasinsName = self.fprefix+'_AllBasins'+raster_ext
# create the map figure
MF = MapFigure(HillshadeName, self.fpath, coord_type="UTM_km", alpha = 0.7)
if(black_bg):
MF.add_drape_image(HillshadeName,self.fpath,colourmap = "gray",alpha=1,colour_min_max = [10000,10001],modify_raster_values=False,old_values=[], new_values=[],NFF_opti = True)
if(lith_raster and cml != None):
color_map_litho = cml
df_litho_size = | pd.read_csv(self.fpath+self.fprefix+"_lithokey.csv") | pandas.read_csv |
# Package import
from __future__ import print_function, division
from warnings import warn
from nilmtk.disaggregate import Disaggregator
import pandas as pd
import numpy as np
from collections import OrderedDict
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from statistics import mean
import os
import time
import argparse
import pickle
import random
import json
from torchsummary import summary
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
import torch.utils.data as tud
from torch.utils.data.dataset import TensorDataset
from torch.utils.tensorboard import SummaryWriter
# Fix the random seed to ensure the reproducibility of the experiment
random_seed = 10
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Use cuda or not
USE_CUDA = torch.cuda.is_available()
DEVICE = 'cuda' if USE_CUDA else 'cpu'
class Encoder(nn.Module):
def __init__(self, power_dis_dim, embed_dim = 128, enc_hid_dim = 128, dec_hid_dim = 256):
super(Encoder, self).__init__()
self.embedding = nn.Embedding(power_dis_dim, embed_dim)
self.rnn = nn.GRU(embed_dim, enc_hid_dim, bidirectional = True, batch_first = True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(0.5)
self.act = nn.Tanh()
def forward(self, mains):
# mains = [batch_size, 1, mains_len]
# embedded = [batch_size, mains_len, embed_dim]
embedded = self.dropout(self.embedding(mains.squeeze(1)))
# enc_output = [batch_size, mains_len, enc_hid_dim * 2], enc_hidden = [batch_size, 2, enc_hid_dim]
enc_output, enc_hidden = self.rnn(embedded)
# s [batch_size, dec_hid_dim] = enc_hidden [batch_size, 2 * enc_hid_dim] * W [enc_hid_dim * 2, dec_hid_dim]
s = self.act(self.fc(enc_hidden.contiguous().view(mains.size(0), -1)))
return enc_output, s
class Attention(nn.Module):
def __init__(self, enc_hid_dim = 128, dec_hid_dim = 256):
super(Attention, self).__init__()
self.W_hs = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim, bias = False)
self.v = nn.Linear(dec_hid_dim, 1, bias = False)
self.act = nn.Tanh()
def forward(self, s, enc_output):
# s = [batch_size, dec_hid_dim], enc_output = [batch_size, mains_len, enc_hid_dim * 2]
batch_size, mains_len = enc_output.size(0), enc_output.size(1)
# repeat decoder hidden state mains_len times, so s = [batch_size, mains_len, dec_hid_dim]
# print(s.size())
s = s.unsqueeze(1).repeat(1, mains_len, 1)
# E [batch_size, mains_len, dec_hid_dim] = h_s [batch_size, mains_len, dec_hid_dim + enc_hid_dim * 2] * W_hs[dec_hid_dim + enc_hid_dim * 2, dec_hid_dim]
E = self.act(self.W_hs(torch.cat((s, enc_output), dim = 2)))
# attention = [batch_size, mains_len]
attention = self.v(E).squeeze(2)
return F.softmax(attention, dim = 1)
class Decoder(nn.Module):
def __init__(self, power_dis_dim, attention, enc_hid_dim = 128, dec_hid_dim = 256):
super(Decoder, self).__init__()
self.power_dis_dim = power_dis_dim
self.attention = attention
self.rnn = nn.GRU(enc_hid_dim * 2, dec_hid_dim, batch_first = True)
self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, power_dis_dim)
self.dropout = nn.Dropout(0.5)
def forward(self, enc_output, s):
# enc_output = [batch_size, mains_len, enc_hid_dim * 2], s = [batch_size, dec_hid_dim]
# a = [batch_size, 1, mains_len]
a = self.attention(s, enc_output).unsqueeze(1)
# c = [batch_size, 1, enc_hid_dim * 2]
c = torch.bmm(a, enc_output)
# dec_output = [batch_size, 1, dec_hid_dim] = dec_hidden = [batch_size, 1, dec_hid_dim]
dec_output, dec_hidden = self.rnn(c, s.unsqueeze(0))
# dec_output = [batch_size, dec_hid_dim], c = [batch_size, enc_hid_dim * 2]
dec_output, c = dec_output.squeeze(1), c.squeeze(1)
# pred = [batch_size, power_dis_dim]
pred = self.fc_out(torch.cat((dec_output, c),dim = 1))
return pred, dec_hidden.squeeze(0)
def initialize(layer):
if isinstance(layer, nn.LSTM):
# Xavier_uniform will be applied to W_{ih}, Orthogonal will be applied to W_{hh}, to be consistent with Keras and Tensorflow
torch.nn.init.xavier_uniform_(layer.weight_ih_l0.data)
torch.nn.init.orthogonal_(layer.weight_hh_l0.data)
torch.nn.init.constant_(layer.bias_ih_l0.data, val = 0.0)
torch.nn.init.constant_(layer.bias_hh_l0.data, val = 0.0)
elif isinstance(layer, nn.Linear):
# Xavier_uniform will be applied to conv1d and dense layer, to be consistent with Keras and Tensorflow
torch.nn.init.xavier_uniform_(layer.weight.data)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias.data, val = 0.0)
class Seq2Seq_Pytorch(nn.Module):
def __init__(self, encoder, decoder, device = DEVICE):
# Refer to "<NAME>, <NAME>, <NAME>, et al. Nonintrusive Load Monitoring based on Sequence-to-sequence Model With Attention Mechanism[J]. Proceedings of the CSEE".
super(Seq2Seq_Pytorch, self).__init__()
self.encoder = encoder
self.encoder.apply(initialize)
self.decoder = decoder
self.decoder.apply(initialize)
self.device = device
def forward(self, mains):
# mains = [batch_size, 1 ,mains_len], appliance = [batch_size, 1, app_len]
batch_size, app_len = mains.size(0), mains.size(2)
# Notice that decoder.output_dim = encoder.input_dim
app_power_dim = self.decoder.power_dis_dim
# tensor to store decoder outputs
outputs = torch.zeros(batch_size, app_len, app_power_dim).to(self.device)
enc_output, s = self.encoder(mains)
# For-loop
for t in range(app_len):
# receive output tensor (predictions) and new hidden state, and place predictions in outputs
dec_output, s = self.decoder(enc_output, s)
outputs[:,t,:] = dec_output
return outputs
def train(appliance_name, model, sequence_length, mains, appliance, epochs, batch_size, pretrain = False, checkpoint_interval = None, train_patience = 3):
# Model configuration
if USE_CUDA:
model = model.cuda()
if not pretrain:
model.apply(initialize)
# summary(model, (1, mains.shape[1]),dtypes = torch.long)
# split the train and validation set
train_mains,valid_mains,train_appliance,valid_appliance = train_test_split(mains, appliance, test_size=.2, random_state = random_seed)
# Create optimizer, loss function, and dataload
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)
loss_fn = torch.nn.CrossEntropyLoss()
train_dataset = TensorDataset(torch.from_numpy(train_mains).long().permute(0,2,1), torch.from_numpy(train_appliance).float().permute(0,2,1))
valid_dataset = TensorDataset(torch.from_numpy(valid_mains).long().permute(0,2,1), torch.from_numpy(valid_appliance).float().permute(0,2,1))
train_loader = tud.DataLoader(train_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)
valid_loader = tud.DataLoader(valid_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)
writer = SummaryWriter(comment='train_visual')
patience, best_loss = 0, None
for epoch in range(epochs):
# Earlystopping
if(patience == train_patience):
print("val_loss did not improve after {} Epochs, thus Earlystopping is calling".format(train_patience))
break
# Train the model
st = time.time()
model.train()
for i, (batch_mains, batch_appliance) in enumerate(train_loader):
if USE_CUDA:
batch_mains = batch_mains.cuda()
batch_appliance = batch_appliance.cuda()
batch_pred = model(batch_mains)
loss = loss_fn(batch_pred.view(batch_size * sequence_length, -1), batch_appliance.view(-1).long())
model.zero_grad()
loss.backward()
optimizer.step()
ed = time.time()
# Evaluate the model
model.eval()
with torch.no_grad():
cnt, loss_sum = 0, 0
for i, (batch_mains, batch_appliance) in enumerate(valid_loader):
if USE_CUDA:
batch_mains = batch_mains.cuda()
batch_appliance = batch_appliance.cuda()
batch_pred = model(batch_mains)
loss = loss_fn(batch_pred.view(batch_size * sequence_length, -1), batch_appliance.view(-1).long())
loss_sum += loss
cnt += 1
final_loss = loss_sum / cnt
# Save best only
if best_loss is None or final_loss < best_loss:
best_loss = final_loss
patience = 0
net_state_dict = model.state_dict()
path_state_dict = "./"+appliance_name+"_seq2seq_best_state_dict.pt"
torch.save(net_state_dict, path_state_dict)
else:
patience = patience + 1
print("Epoch: {}, Valid_Loss: {}, Time consumption: {}.".format(epoch, final_loss, ed - st))
# For the visualization of training process
for name,param in model.named_parameters():
writer.add_histogram(name + '_grad', param.grad, epoch)
writer.add_histogram(name + '_data', param, epoch)
writer.add_scalars("MSELoss", {"Valid":final_loss}, epoch)
# Save checkpoint
if (checkpoint_interval != None) and ((epoch + 1) % checkpoint_interval == 0):
checkpoint = {"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch}
path_checkpoint = "./"+appliance_name+"_seq2seq_checkpoint_{}_epoch.pt".format(epoch)
torch.save(checkpoint, path_checkpoint)
def test(model, test_mains, batch_size = 512):
# Model test
st = time.time()
model.eval()
# Create test dataset and dataloader
batch_size = test_mains.shape[0] if batch_size > test_mains.shape[0] else batch_size
test_dataset = TensorDataset(torch.from_numpy(test_mains).float().permute(0,2,1))
test_loader = tud.DataLoader(test_dataset, batch_size = batch_size, shuffle = False, num_workers = 0)
with torch.no_grad():
for i, batch_mains in enumerate(test_loader):
batch_pred = torch.argmax(model(batch_mains[0].long()).cpu(), dim = -1)
if i == 0:
res = batch_pred
else:
res = torch.cat((res, batch_pred), dim = 0)
ed = time.time()
print("Inference Time consumption: {}.".format(ed - st))
return res.numpy()
class Seq2Seq(Disaggregator):
def __init__(self, params):
self.MODEL_NAME = "Seq2Seq"
self.sequence_length = params.get('sequence_length',63)
self.n_epochs = params.get('n_epochs', 10)
self.batch_size = params.get('batch_size',512)
self.appliance_params = params.get('appliance_params',{})
self.mains_max = params.get('mains_max', 10000)
self.models = OrderedDict()
def partial_fit(self, train_main, train_appliances, pretrain = False, do_preprocessing=True,**load_kwargs):
# To preprocess the data and bring it to a valid shape
if do_preprocessing:
print ("Doing Preprocessing")
train_main, train_appliances, power_dis_dim = self.call_preprocessing(train_main, train_appliances,'train')
train_main = pd.concat(train_main, axis = 0).values
train_main = train_main.reshape((-1, self.sequence_length, 1))
new_train_appliances = []
for app_name, app_df in train_appliances:
app_df = | pd.concat(app_df, axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 10 14:24:56 2021
@author: <NAME>
International connected electricity sector
- NO3 connected to DK1
- SE3 connected to DK1
- SE4 connected to DK2
- DE connected to DK1
- DE connected to DK2
- NL connected to DK1
- Possible to add CO2 constraint
- To remove a country from simulation -> comment out the section. Be
aware of plots.
Reads data for the period 2017 dowloaded from
data.open-power-system-data.org
Capacity factor is determined using installed capacity per production type
data from www.transparency.entsoe.eu
"""
#%% Import and define
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from pandas.tseries.offsets import DateOffset
def annuity(n,r):
"""Calculate the annuity factor for an asset with lifetime n years and
discount rate of r, e.g. annuity(20,0.05)*20 = 1.6"""
if r > 0:
return r/(1. - 1./(1.+r)**n)
else:
return 1/n
# Create network and snapshot
network = pypsa.Network()
hours_in_2017 = pd.date_range('2017-01-01T00:00Z','2017-12-31T23:00Z', freq='H')
network.set_snapshots(hours_in_2017)
# Load data: Demand and enerators for 6 regions
df_elec = pd.read_csv('data/2017_entsoe.csv', sep=',', index_col=0) # in MWh
df_elec.index = pd.to_datetime(df_elec.index) #change index to datatime
df_heat = pd.read_csv('data/heat_demand.csv', sep=';', index_col=0)
df_heat.index = | pd.to_datetime(df_heat.index) | pandas.to_datetime |
import gc
import os
import numpy as np
import pandas as pd
DEVELOP_MODE = True
if DEVELOP_MODE:
import resource
# 15.5 GB
memory_limit = 15.5 * 1024 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_RSS, (memory_limit, memory_limit))
else:
os.system("conda install -y cmake")
os.system("apt-get update")
os.system("unzip {}/optable.zip".format(os.path.dirname(__file__)))
os.system("cd optable_package/deb"
"&& dpkg -i pkg-config_0.29-4+b1_amd64.deb")
os.system("cd optable_package/deb"
"&& dpkg -i gcc-8-base_8.3.0-7_amd64.deb")
os.system("cd optable_package/deb"
"&& dpkg -i libstdc++6_8.3.0-7_amd64.deb")
os.system("cd optable_package/deb"
"&& dpkg -i libboost1.62-dev_1.62.0+dfsg-10+b1_amd64.deb")
os.system("cd optable_package/deb"
"&& dpkg -i libboost-math1.62.0_1.62.0+dfsg-10+b1_amd64.deb")
os.system("cd optable_package/deb"
"&& dpkg -i libboost-math1.62-dev_1.62.0+dfsg-10+b1_amd64.deb")
os.system("cd optable_package/deb"
"&& dpkg -i libeigen3-dev_3.3.7-1_all.deb")
# os.system("apt-get install -y libboost-dev")
# os.system("apt-get update")
# os.system("apt-get install -y libboost-math-dev")
# os.system("apt-get update")
# os.system("apt-get install -y python3.6-dev")
# os.system("apt-get update")
# os.system("apt-get install -y libeigen3-dev")
os.system("echo \"\" > /usr/share/dpkg/no-pie-link.specs")
os.system("echo \"\" > /usr/share/dpkg/no-pie-compile.specs")
os.system("pip3 install pybind11==2.3.0")
os.system("cd optable_package && pip3 install .")
os.system("pip3 install lightgbm==2.2.3")
# os.system("cd optable_package/PFLightGBM && mkdir build && cd build "
# "&& cmake .. && make -j4 "
# "&& cd ../python-package && python3 setup.py install")
os.system("pip3 install timeout-decorator==0.4.1")
os.system("pip3 install optable_package/optuna.zip")
os.system("pip3 install psutil==5.6.3")
import optable # NOQA
import timeout_decorator # NOQA
class Model:
def __init__(self, info):
self.info = info
self.Xs = None
self.y = None
self.timer = optable.Timer(
self.info["time_budget"], self.info["time_budget"])
def fit(self, Xs, y, time_remain):
self.Xs = Xs
self.y = y
self.timer.update_time_remain(time_remain)
self.timer.print_memory_usage()
def _predict(self, X_test, time_remain):
self.timer.update_time_remain(time_remain)
print("predict time remain:", time_remain)
self.timer.print_memory_usage()
# make dataset
self.timer.print("start making dataset")
train_size = len(self.Xs[optable.CONSTANT.MAIN_TABLE_NAME])
test_size = len(X_test)
dataset_maker = optable.DatasetMakerForAutoML()
dataset = dataset_maker.make(self.Xs, X_test, self.y, self.info)
self.timer.print("finished making dataset")
self.timer.print_memory_usage()
# synthesis
self.timer.print("start synthesis")
synthesizer = \
optable.Synthesizer(
dataset, timer=self.timer, priority_perturbation=0.0)
max_feature_size = min([int(4.5e8 / train_size),
int(8e8 / (train_size + test_size)),
int(np.log10(train_size) * 70 + 200)])
synthesizer.synthesis(
max_feature_size, self.timer.ratio_remain_time(0.3))
self.timer.print("finished synthesis")
self.timer.print_memory_usage()
dataset.clear_cache_of_table()
gc.collect()
optable._core.malloc_trim(0)
self.timer.print_memory_usage()
main_table = dataset.tables[
optable.CONSTANT.MAIN_TABLE_NAME]
main_table.confirm_new_data()
del dataset_maker, dataset
del synthesizer
optable._core.malloc_trim(0)
gc.collect()
self.timer.print_memory_usage()
max_cat_nunique = min([
int(np.power(self.y.values.sum(), 0.3)),
int(np.power((1 - self.y.values).sum(), 0.3))])
feature, cat_idx = main_table.get_lightgbm_df(max_cat_nunique)
main_sorted_time_index = main_table.sorted_time_index
# num_idx = [col_idx for col_idx in range(feature.shape[1])
# if col_idx not in cat_idx]
del main_table
optable._core.malloc_trim(0)
gc.collect()
self.timer.print_memory_usage()
train_feature = feature.iloc[:train_size]
test_feature = feature.iloc[train_size:]
train_sorted_time_index = \
main_sorted_time_index[main_sorted_time_index < train_size]
self.timer.print("train_feature shape {}".format(train_feature.shape))
self.timer.print("test_feature shape {}".format(test_feature.shape))
# feature selection
self.timer.print("start feature selection")
selector = optable.AdversarialAUCSelector(
train_size, threshold=0.3, max_ratio=0.5)
selected = selector.fit(
feature.values, self.y.values,
main_sorted_time_index, cat_idx)
del feature, selector
gc.collect()
self.timer.print("finished feature selection")
self.timer.print("{} / {} is selected".format(
selected.sum(), len(selected)))
drop_columns = train_feature.columns[np.logical_not(selected)]
self.timer.print("droped {}".format(drop_columns))
self.timer.print_memory_usage()
train_feature = train_feature.iloc[:, selected]
gc.collect()
test_feature = test_feature.iloc[:, selected]
gc.collect()
train_feature = train_feature.values
gc.collect()
train_feature = train_feature.astype(dtype=np.float32)
gc.collect()
test_feature = test_feature.values
gc.collect()
test_feature = test_feature.astype(dtype=np.float32)
gc.collect()
is_cat = np.zeros(len(selected)).astype(bool)
is_cat[cat_idx] = True
cat_idx = np.where(is_cat[selected])[0].tolist()
gc.collect()
self.timer.print_memory_usage()
# hyper parameter optimization
self.timer.print("start hyper parameter optimization")
"""
if np.abs(0.5 - self.y.values.mean()) > 0.49:
mode = "logloss"
else:
mode = "auc"
"""
mode = "auc"
ohp_searcher = optable.OptunaHyperParamsSearcher(
mode=mode, timer=self.timer)
# ohp_searcher = optable.TimeSplitHyperParamsSearcher(
# mode=mode, timer=self.timer)
params_list = ohp_searcher.fit(
train_feature, self.y.values,
train_sorted_time_index, cat_idx)
self.timer.print("finished hyper parameter optimization")
del ohp_searcher
del self.Xs, X_test
gc.collect()
self.timer.print_memory_usage()
self.timer.print("start model training and prediction")
lgb_model = optable.LightGBMCVAsWellAsPossible(
params_list, self.timer, n_split=10, max_model=10)
# lgb_model = optable.TimeSplitLightGBMCV(
# params_list, self.timer, n_split=5, max_model=10)
gc.collect()
self.timer.print_memory_usage()
predicted = lgb_model.fit_predict(
train_feature, self.y.values, test_feature, cat_idx)
self.timer.print("finished model training and prediction")
del train_feature, test_feature, lgb_model, self.y
gc.collect()
self.timer.print_memory_usage()
return | pd.Series(predicted) | pandas.Series |
import csv
import yfinance as yf
import pandas as pd
import time
def update_csv(tickers):
'''
Updates the stock data of all csv files
@params:
tickers(list): list of tickers to be written to a file
@return:
closing list
'''
for ticker in tickers:
data = yf.download(tickers=ticker, group_by='Close', interval='15m', period='45d')
close = data['Close']
close_list = close.to_csv('C:\python_projects\AlgoTrader\daily_data\stock_data_{ticker}.csv'.format(ticker=ticker))
return close_list
def update_single_csv(ticker):
'''
Updates the stock data of a single csv file
@params:
ticker(str): ticker of file to be updated
'''
stock = yf.Ticker(ticker)
data = stock.history(period='3mo', interval='1d')
hist = | pd.DataFrame(data=data, columns=['Close']) | pandas.DataFrame |
"""
=================
gprof_nn.data.sim
=================
This module defines a class to read the simulator output files (*.sim) that
contain the atmospheric profiles and corresponding simulated brightness
temperatures.
The module also provides functionality to extract the training data for the
GPROF-NN algorithm from these files.
"""
from concurrent import futures
from datetime import datetime
import logging
import os
from pathlib import Path
import subprocess
import tempfile
import numpy as np
import pandas as pd
from pykdtree.kdtree import KDTree
from rich.progress import Progress
import xarray as xr
import gprof_nn
from gprof_nn import sensors
from gprof_nn.definitions import N_LAYERS
from gprof_nn.definitions import (
ALL_TARGETS,
LEVELS,
DATABASE_MONTHS,
PROFILE_NAMES,
)
from gprof_nn.coordinates import latlon_to_ecef
from gprof_nn.data.l1c import L1CFile
from gprof_nn.data.mrms import MRMSMatchFile
from gprof_nn.data.preprocessor import run_preprocessor
from gprof_nn.data.utils import compressed_pixel_range, N_PIXELS_CENTER
from gprof_nn.logging import get_console
from gprof_nn.utils import CONUS
LOGGER = logging.getLogger(__name__)
###############################################################################
# Data types
###############################################################################
N_CHANS_MAX = 15
GENERIC_HEADER = np.dtype(
[
("satellite_code", "a5"),
("sensor", "a5"),
]
)
###############################################################################
# GPROF GMI Simulation files
###############################################################################
class SimFile:
"""
Interface class to read GPROF .sim files.
The main purpose of this class is to provide and interface to read
.sim files and convert them to 'xarray.Dataset' objects via the
'to_xarray_dataset' method.
Attributes:
path: The path of the file
granule: The GPM CO granule number to which this file corresponds.
date: Date object specifying the day of the corresponding GPM orbit.
sensor: Sensor object representing the sensor corresponding to
this file.
header: Numpy structured array containing the header data of the
file.
data: Numpy structured array containing raw data of the file.
"""
@classmethod
def find_files(cls, path, sensor=sensors.GMI, day=None):
"""
Find all files that match the standard filename pattern for
sim files for the given sensor.
Args:
path: Root of the directory tree in which to look for .sim
files.
sensor: The sensor for which to find .sim files.
day: If given search is restricted to the given day within
each month.
Return:
A list containing the found .sim files.
"""
if day is None:
pattern = sensor.sim_file_pattern.format(day="??")
else:
pattern = sensor.sim_file_pattern.format(day=f"{day:02}")
path = Path(path)
files = list(path.glob("**/????/" + pattern))
if not files:
files = list(path.glob("**/" + pattern))
return files
def __init__(self, path):
"""
Open a .sim file.
Args:
path: Path to the .sim file to open.
"""
self.path = path
parts = str(path).split(".")
self.granule = int(parts[-2])
year = int(parts[-3][:4])
month = int(parts[-3][4:6])
day = int(parts[-3][6:])
self.date = datetime(year, month, day)
header = np.fromfile(self.path, GENERIC_HEADER, count=1)
sensor = header["sensor"][0].decode().strip()
try:
sensor = getattr(sensors, sensor.upper())
except AttributeError:
raise ValueError(f"The sensor {sensor} isn't currently supported.")
self.sensor = sensor
self.header = np.fromfile(self.path, self.sensor.sim_file_header, count=1)
offset = self.sensor.sim_file_header.itemsize
self.data = np.fromfile(self.path, sensor.sim_file_record, offset=offset)
def match_targets(self, input_data, targets=None):
"""
Match retrieval targets from .sim file to points in
xarray dataset.
Args:
input_data: xarray dataset containing the input data from
the preprocessor.
targets: List of retrieval target variables to extract from
the sim file.
Return:
The input dataset but with the requested retrieval targets added.
"""
if targets is None:
targets = ALL_TARGETS
path_variables = [t for t in targets if "path" in t]
for var in path_variables:
profile_variable = var.replace("path", "content").replace("ice", "snow")
if profile_variable not in targets:
targets.append(profile_variable)
targets = [t for t in targets if "path" not in t]
n_scans = input_data.scans.size
n_pixels = 221
w_c = 40
i_c = 110
ix_start = i_c - w_c // 2
ix_end = i_c + 1 + w_c // 2
i_left, i_right = compressed_pixel_range()
lats_1c = input_data["latitude"][:, ix_start:ix_end].data.reshape(-1, 1)
lons_1c = input_data["longitude"][:, ix_start:ix_end].data.reshape(-1, 1)
coords_1c = latlon_to_ecef(lons_1c, lats_1c)
coords_1c = np.concatenate(coords_1c, axis=1)
lats = self.data["latitude"].reshape(-1, 1)
lons = self.data["longitude"].reshape(-1, 1)
coords_sim = latlon_to_ecef(lons, lats)
coords_sim = np.concatenate(coords_sim, 1)
# Determine indices of matching L1C observations.
kdtree = KDTree(coords_1c)
dists, indices = kdtree.query(coords_sim)
n_angles = 0
if self.sensor.n_angles > 1:
n_angles = self.sensor.n_angles
n_chans = self.sensor.n_chans
if "tbs_simulated" in self.data.dtype.fields:
if n_angles > 0:
shape = (n_scans, w_c + 1, n_angles, n_chans)
full_shape = (n_scans, n_pixels, n_angles, n_chans)
matched = np.zeros((n_scans * (w_c + 1), n_angles, n_chans))
dims = ("scans", "pixels_center", "angles", "channels")
else:
shape = (n_scans, w_c + 1, n_chans)
full_shape = (n_scans, n_pixels, n_chans)
matched = np.zeros((n_scans * (w_c + 1), n_chans))
dims = ("scans", "pixels_center", "channels")
matched[:] = np.nan
assert np.all(indices[dists < 10e3] < matched.shape[0])
indices = np.clip(indices, 0, matched.shape[0] - 1)
tbs = self.data["tbs_simulated"]
if isinstance(self.sensor, sensors.ConstellationScanner):
tbs = tbs[..., self.sensor.gmi_channels]
# tbs = tbs.reshape((-1,) + shape[2:])
matched[indices, ...] = tbs
matched[indices, ...][dists > 10e3] = np.nan
matched = matched.reshape(shape)
matched_full = np.zeros(full_shape, dtype=np.float32)
matched_full[:] = np.nan
matched_full[:, ix_start:ix_end] = matched
input_data["simulated_brightness_temperatures"] = (
dims,
matched_full[:, i_left:i_right],
)
if "tbs_bias" in self.data.dtype.fields:
shape = (n_scans, w_c + 1, n_chans)
full_shape = (n_scans, n_pixels, n_chans)
matched = np.zeros((n_scans * (w_c + 1), n_chans))
matched[:] = np.nan
biases = self.data["tbs_bias"]
if isinstance(self.sensor, sensors.ConstellationScanner):
biases = biases[..., self.sensor.gmi_channels]
matched[indices, ...] = biases
matched[indices, ...][dists > 10e3] = np.nan
matched = matched.reshape(shape)
matched_full = np.zeros(full_shape, dtype=np.float32)
matched_full[:] = np.nan
matched_full[:, ix_start:ix_end] = matched
input_data["brightness_temperature_biases"] = (
("scans", "pixels_center", "channels"),
matched_full[:, i_left:i_right],
)
# Extract matching data
for target in targets:
if target in PROFILE_NAMES:
n = n_scans * (w_c + 1)
shape = (n_scans, w_c + 1, 28)
full_shape = (n_scans, n_pixels, 28)
matched = np.zeros((n, 28), dtype=np.float32)
else:
n = n_scans * (w_c + 1)
if n_angles > 0:
shape = (n_scans, w_c + 1, n_angles)
full_shape = (n_scans, n_pixels, n_angles)
matched = np.zeros((n, n_angles), dtype=np.float32)
else:
shape = (n_scans, w_c + 1)
full_shape = (n_scans, n_pixels)
matched = np.zeros(n, dtype=np.float32)
matched[:] = np.nan
matched[indices, ...] = self.data[target]
matched[indices, ...][dists > 5e3] = np.nan
matched = matched.reshape(shape)
matched_full = np.zeros(full_shape, dtype=np.float32)
matched_full[:] = np.nan
matched_full[:, ix_start:ix_end] = matched
if target in PROFILE_NAMES:
data = matched_full[:, i_left:i_right]
input_data[target] = (
("scans", "pixels_center", "levels"),
data.astype(np.float32),
)
if "content" in target:
path = np.trapz(data, x=LEVELS, axis=-1) * 1e-3
path_name = target.replace("content", "path").replace("snow", "ice")
input_data[path_name] = (("scans", "pixels_center"), path)
else:
if target in ["surface_precip", "convective_precip"]:
dims = ("scans", "pixels")
if n_angles > 0:
dims = dims + ("angles",)
input_data[target] = (dims, matched_full.astype(np.float32))
else:
input_data[target] = (
("scans", "pixels_center"),
matched_full[:, i_left:i_right].astype(np.float32),
)
return input_data
def to_xarray_dataset(self):
"""
Return data in sim file as 'xarray.Dataset'.
"""
results = {}
dim_dict = {
self.sensor.n_chans: "channels",
N_LAYERS: "layers",
}
if isinstance(self.sensor, sensors.CrossTrackScanner):
dim_dict[self.sensor.n_angles] = "angles"
record_type = self.sensor.sim_file_record
for key, _, *shape in record_type.descr:
data = self.data[key]
if key in [
"emissivity",
"tbs_observed",
"tbs_simulated",
"tbs_bias",
"d_tbs",
]:
if isinstance(self.sensor, sensors.ConstellationScanner):
data = data[..., self.sensor.gmi_channels]
dims = ("samples",)
if len(data.shape) > 1:
dims = dims + tuple([dim_dict[s] for s in data.shape[1:]])
results[key] = dims, self.data[key]
dataset = xr.Dataset(results)
return dataset
ENHANCEMENT_FACTORS = {
"ERA5": {
(17, 0): 1.35683,
(17, 1): 2.05213,
(17, 2): 1.62242,
(17, 3): 1.87049,
(18, 0): 3.91369,
},
"GANAL": {
(17, 0): 1.58177,
(17, 1): 1.81539,
(18, 0): 3.91369,
},
}
def apply_orographic_enhancement(data, kind="ERA5"):
"""
Applies orographic enhancement factors to 'surface_precip' and
'convective_precip' targets.
Args:
data: xarray.Dataset containing variables surface_precip,
convective_precip, surface_type and airmass_type.
kind: "ERA5" or "GANAL" depending on the source of ancillary data.
Returns:
None; Correction is applied in place.
"""
kind = kind.upper()
if kind not in ["ERA5", "GANAL"]:
raise ValueError("The kind argument to must be 'ERA5' or 'GANAL'.")
surface_types = data["surface_type"].data
airmass_types = data["airmass_type"].data
surface_precip = data["surface_precip"].data
convective_precip = data["convective_precip"].data
enh = np.ones(surface_precip.shape, dtype=np.float32)
factors = ENHANCEMENT_FACTORS[kind]
for t_s in [17, 18]:
for t_a in range(4):
key = (t_s, t_a)
if key not in factors:
continue
indices = (surface_types == t_s) * (airmass_types == t_a)
enh[indices] = factors[key]
surface_precip *= enh
convective_precip *= enh
###############################################################################
# Helper functions
###############################################################################
def _extract_scenes(data):
"""
Extract 221 x 221 pixel wide scenes from dataset where
ground truth surface precipitation rain rates are
available.
Args:
xarray.Dataset containing the data from the preprocessor together
with the matches surface precipitation from the .sim file.
Return:
New xarray.Dataset which containing 128x128 patches of input data
and corresponding surface precipitation.
"""
n = 221
surface_precip = data["surface_precip"].data
if np.all(np.isnan(surface_precip)):
return None
i_start = 0
i_end = data.scans.size
scenes = []
while i_start + n < i_end:
subscene = data[{"scans": slice(i_start, i_start + n)}]
surface_precip = subscene["surface_precip"].data
if np.isfinite(surface_precip).sum() > 100:
scenes.append(subscene)
i_start += n
else:
i_start += n // 2
if scenes:
return xr.concat(scenes, "samples")
return None
def _find_l1c_file(path, sim_file):
"""
Find GPROF GMI L1C file corresponding to .sim file.
Args:
path: Path pointing to the root of the folder tree containing the
L1C files.
sim_files: SimFile for which to find the corresponding L1C
file.
Return:
The corresponding L1C file.
"""
year = sim_file.date.year - 2000
month = sim_file.date.month
day = sim_file.date.day
path = Path(path) / f"{year:02}{month:02}" / f"{year:02}{month:02}{day:02}"
files = path.glob(f"1C-R*{sim_file.granule}*.HDF5")
return next(iter(files))
def _load_era5_data(start_time, end_time, base_directory):
"""
Loads ERA5 data matching the start and end time of a L1C
file.
Args:
start_time: First scan time from L1C file.
end_time: Last scan time from L1C file.
base_directory: Root of the directory tree containing the
ERA5 files.
"""
start_time = pd.to_datetime(start_time)
end_time = | pd.to_datetime(end_time) | pandas.to_datetime |
"""
Created on Mon Apr 12 09:17:14 2021
Developed for UIF to more easily handle the growing number of alumni they have,
and to track interactions with said alumni.
Final Project for CCAC DAT-281
@author: BKG
"""
import os
import sys
import sqlite3
from sqlite3 import Error
import pandas as pd
import PySimpleGUI as sg
def main():
"""
The main menu
Present ths user with a gui and 4 buttons to choose from
Based on what the user clicks on, executes other functions or closes
Returns
-------
None.
"""
os.chdir(os.path.dirname(sys.argv[0]))
sg.theme('DarkBlue3')
layout = [[sg.Text('Please select an action that you would like to perform:',
size=(25,3),
font=('Arial', 15))],
[sg.Button('Import new alumni to the database',
key='alum',
size=(30,1))],
[sg.Button('Import new interaction with alumni',
key='interaction',
size=(30,1))],
[sg.Text('_' * 100, size=(32, 1))],
[sg.Button('Export list of alumni with ID numbers',
key='export_ID',
size=(30,1))],
[sg.Button('Export list of next alumni to contact',
key='contact',
size=(30,1))],
[sg.Text('_' * 100, size=(32, 1))],
[sg.Button('Close the program',
key='close',
size=(30,1))]]
window = sg.Window('UIF: Alumni Database', layout)
while True:
event = window.read()
if event[0] == 'alum':
window.close()
main_alum()
elif event[0] == 'interaction':
window.close()
main_interaction()
elif event[0] == 'export_ID':
window.close()
main_export_id()
elif event[0] == 'contact':
window.close()
main_contact()
elif event[0] in ('close', sg.WIN_CLOSED):
break
window.close()
def main_alum():
location = select_file()
if location is not None:
new_alumni_gui(location)
else:
main()
def main_interaction():
location = select_file()
if location is not None:
new_interaction_gui(location)
else:
main()
def main_export_id():
location = select_folder()
if location is not None:
export_alumni_name_list(location)
else:
main()
def main_contact():
location = select_folder()
if location is not None:
export_alumni_contact_list(location)
else:
main()
def select_file():
layout = [[sg.Text('Folder Location')],
[sg.Input(), sg.FileBrowse()],
[sg.OK(), sg.Cancel()] ]
window = sg.Window('UIF: Alumni Database', layout)
values = window.read()
window.close()
if values[1][0] != '':
return values[1][0]
return None
def select_folder():
layout = [[sg.Text('Folder Location')],
[sg.Input(), sg.FolderBrowse()],
[sg.OK(), sg.Cancel()] ]
window = sg.Window('UIF: Alumni Database', layout)
values = window.read()
window.close()
if values[1][0] != '':
return values[1][0]
return None
def all_good():
layout = [[sg.Text('Everything completed without errors.',
font=('Arial', 15))],
[sg.Button('Exit the program', key='close')]]
window = sg.Window('UIF: Alumni Database', layout)
while True:
event = window.read()
if event[0] in ('close', sg.WIN_CLOSED):
break
window.close()
def export_alumni_name_list(path):
"""
Opens a connection to the database.
Queries the database.
Output is put into a dataframe.
Dataframe is written to .csv file.
Returns
-------
None.
"""
connection = _db_connection()
query = ''' SELECT ID_number, first_name, last_name,
graduation_year, CORE_student
FROM Basic_Info
ORDER BY last_name ASC
'''
output = pd.read_sql(query, con=connection)
connection.close()
col_names = ['ID Number',
'First Name',
'Last Name',
'Graduation Year',
'CORE?']
output.columns = col_names
file_name = 'Master Alumni List.csv'
# path = select_folder()
os.chdir(path)
output.to_csv(file_name, index=False, encoding='utf-8')
all_good()
def export_alumni_contact_list(path):
query_read = '''SELECT c.ID_number, c.first_name, c.last_name,
c.CORE_student, c.last_date, b.phone_num, b.email
FROM Last_Contact c
INNER JOIN Basic_Info b
ON c.ID_number = b.ID_number
WHERE last_date < DATE('now', '-90 days')
ORDER BY c.CORE_student DESC, c.last_date ASC
'''
connection = _db_connection()
contact = pd.read_sql(query_read, con=connection)
connection.close()
col_names = ['ID Number',
'First Name',
'Last Name',
'CORE?',
'Last Contact Date',
'Phone Number',
'Email']
contact.columns = col_names
file_name = 'Alumni to Contact.csv'
# path = select_folder()
os.chdir(path)
contact.to_csv(file_name, index=False, encoding='utf-8')
all_good()
def new_alumni_gui(location):
alumni_display = pd.read_csv(location)
display_cols = ['Last Name',
'First Name',
'Graduation Year']
alumni_display = alumni_display[display_cols]
data = alumni_display.values.tolist()
header_list = alumni_display.columns.tolist()
layout = [[sg.Text('The following alumni will be added to the database:\n')],
[sg.Table(values=data,
headings=header_list,
display_row_numbers=True,
auto_size_columns=True,
num_rows=min(25, len(data)))],
[sg.Button('Confirm', key='import')],
[sg.Button('Cancel', key='cancel')],
[sg.Button('Main Menu', key='main')]]
window = sg.Window('UIF: Alumni Database', layout)
while True:
event = window.read()
if event[0] == 'import':
window.close()
import_alumni_p1(location)
all_good()
elif event[0] == 'main':
window.close()
main()
elif event[0] == 'cancel':
window.close()
main()
elif event[0] == sg.WIN_CLOSED:
break
window.close()
def new_interaction_gui(location):
interaction = pd.read_csv(location)
col_names = ['ID_number',
'first_name',
'last_name',
'contact_date',
'status',
'need',
'notes']
interaction.columns = col_names
display_cols = ['last_name',
'first_name',
'contact_date',
'notes']
interaction['contact_date'] = pd.to_datetime(interaction['contact_date']).dt.strftime('%Y-%m-%d')
display = interaction[display_cols]
data = display.values.tolist()
header_list = display.columns.tolist()
layout = [[sg.Text('The following alumni interactions will be added'+
'to the database: \n')],
[sg.Table(values=data,
headings=header_list,
display_row_numbers=True,
auto_size_columns=True,
num_rows=min(25, len(data)))],
[sg.Button('Confirm', key='import')],
[sg.Button('Cancel - Do NOT Add', key='cancel')],
[sg.Button('Main Menu', key='main')]]
window = sg.Window('UIF: Alumni Database', layout)
while True:
event = window.read()
if event[0] == 'import':
window.close()
import_new_interaction(interaction)
update_last_contact(interaction)
elif event[0] == 'main':
window.close()
main()
elif event[0] in ('cancel', sg.WIN_CLOSED):
break
window.close()
all_good()
def update_last_contact(interaction):
query_read = '''SELECT ID_number, last_date
FROM Last_Contact
WHERE ID_number = :id
'''
query_write = '''UPDATE Last_Contact
SET last_date = ?
WHERE ID_number = ?
'''
connection = _db_connection()
cursor = connection.cursor()
for i in interaction.index:
id_num = int(interaction.loc[i, 'ID_number'])
date_df = pd.read_sql(query_read,
con=connection,
params={'id':id_num})
if date_df.iloc[0]['last_date'] < interaction.iloc[i]['contact_date']:
cursor.execute(query_write,
(interaction.iloc[i]['contact_date'], id_num))
connection.commit()
else:
print(interaction.iloc[i]['contact_date'], 'is too old..')
connection.close()
def import_new_interaction(interaction):
interaction['contact_date'] = | pd.to_datetime(interaction['contact_date']) | pandas.to_datetime |
from genericpath import exists
import os
import pandas as pd
from PIL import Image
import numpy as np
from tqdm import tqdm
import librosa
def sample_count(input_path,save_path):
info = []
for item in os.scandir(input_path):
info_item = [item.name,len(os.listdir(item.path))]
info.append(info_item)
info.sort(key=lambda x:eval(x[0]))
col = ['class','sample_num']
csv_file = pd.DataFrame(columns=col,data=info)
csv_file.to_csv(save_path,index=False)
def size_count(input_path,save_path):
info = []
for subdir in os.scandir(input_path):
info_item = [subdir.name]
size_list = []
for item in os.scandir(subdir.path):
img = Image.open(item.path)
size_list.append(img.size)
info_item.append(list(set(size_list)))
info.append(info_item)
info.sort(key=lambda x:eval(x[0]))
col = ['class','size_list']
csv_file = | pd.DataFrame(columns=col,data=info) | pandas.DataFrame |
from django.test import TestCase
from transform_layer.services.data_service import DataService, KEY_SERVICE, KEY_MEMBER, KEY_FAMILY
from transform_layer.calculations import CalculationDispatcher
from django.db import connections
import pandas
from pandas.testing import assert_frame_equal, assert_series_equal
import unittest
class HasDataTestCase(unittest.TestCase):
def test_has_data_empty_dataframe(self):
data = pandas.DataFrame()
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_nonempty_dataframe(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = pandas.DataFrame(d1)
self.assertTrue(CalculationDispatcher.has_data(data))
def test_has_data_no_services(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = {
KEY_SERVICE: pandas.DataFrame(),
KEY_MEMBER: pandas.DataFrame(d1),
KEY_FAMILY: pandas.DataFrame(d1)
}
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_no_members(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = {
KEY_SERVICE: pandas.DataFrame(d1),
KEY_MEMBER: | pandas.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_police():
pois_df = DataFrame(
data=list_random_police,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'police'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'police'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_police(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_join_colletive_areas():
move_df = MoveDataFrame(
data=list_move,
)
move_df['geometry'] = move_df.apply(lambda x: Point(x['lon'], x['lat']), axis=1)
expected = move_df.copy()
indexes_ac = np.linspace(0, move_df.shape[0], 5, dtype=int)
area_c = move_df[move_df.index.isin(indexes_ac)].copy()
integration.join_collective_areas(move_df, area_c, inplace=True)
expected[VIOLATING] = [True, False, True, False, True, False, True, False, False]
assert_frame_equal(move_df, expected)
def test__reset_and_creates_id_and_lat_lon():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, True
)
)
id_expected = np.full(9, '', dtype='object_')
tag_expected = np.full(9, '', dtype='object_')
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
lat_expected = np.full(7, np.Infinity, dtype=np.float64)
lon_expected = np.full(7, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, True
)
)
lat_expected = np.full(9, np.Infinity, dtype=np.float64)
lon_expected = np.full(9, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
def test__reset_set_window__and_creates_event_id_type():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-22T17:23:05.000000000', '2008-10-22T22:07:26.000000000',
'2008-10-22T22:20:16.000000000', '2008-10-22T22:33:06.000000000',
'2008-10-22T23:28:33.000000000', '2008-10-23T11:20:45.000000000',
'2008-10-23T11:32:14.000000000', '2008-10-23T11:52:01.000000000',
'2008-10-23T13:27:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T18:23:05.000000000', '2008-10-23T23:07:26.000000000',
'2008-10-23T23:20:16.000000000', '2008-10-23T23:33:06.000000000',
'2008-10-24T00:28:33.000000000', '2008-10-24T12:20:45.000000000',
'2008-10-24T12:32:14.000000000', '2008-10-24T12:52:01.000000000',
'2008-10-24T14:27:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
type_expected = np.full(9, '', dtype='object_')
id_expected = np.full(9, '', dtype='object_')
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window__and_creates_event_id_type(
move_df, pois, 45000, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_almost_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_reset_set_window_and_creates_event_id_type_all():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
move_df = MoveDataFrame(list_move)
list_events = [
[39.984094, 116.319236, 1, Timestamp('2008-10-24 01:57:57'),
'show do tropykalia'],
[39.991013, 116.326384, 2, Timestamp('2008-10-24 00:22:01'),
'evento da prefeitura'],
[40.01, 116.312615, 3, Timestamp('2008-10-25 00:21:01'),
'show do seu joao'],
[40.013821, 116.306531, 4, Timestamp('2008-10-26 00:22:01'),
'missa']
]
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-23T03:53:05.000000000', '2008-10-23T08:37:26.000000000',
'2008-10-23T08:50:16.000000000', '2008-10-23T09:03:06.000000000',
'2008-10-23T09:58:33.000000000', '2008-10-23T21:50:45.000000000',
'2008-10-23T22:02:14.000000000', '2008-10-23T22:22:01.000000000',
'2008-10-23T23:57:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T07:53:05.000000000', '2008-10-23T12:37:26.000000000',
'2008-10-23T12:50:16.000000000', '2008-10-23T13:03:06.000000000',
'2008-10-23T13:58:33.000000000', '2008-10-24T01:50:45.000000000',
'2008-10-24T02:02:14.000000000', '2008-10-24T02:22:01.000000000',
'2008-10-24T03:57:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(9, None, dtype=np.ndarray)
type_expected = np.full(9, None, dtype=np.ndarray)
id_expected = np.full(9, None, dtype=np.ndarray)
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window_and_creates_event_id_type_all(
move_df, pois, 7200, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_join_with_pois():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 1,
0.0, 'distrito_pol_1'],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, 6, 128.24869775642176, 'adocao_de_animais'],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, 5, 663.0104596559174, 'rinha_de_galo_world_cup'],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, 4, 286.3387434682031, 'forro_tropykalia'],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 4,
0.9311014399622559, 'forro_tropykalia'],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 3,
211.06912863495492, 'supermercado_aroldo'],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 'policia_federal'],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 6,
792.7526066105717, 'adocao_de_animais'],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 7,
270.7018856738821, 'dia_do_municipio']
],
columns=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, ID_POI, DIST_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_pois(move_df, pois, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_pois_by_category():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 1,
0.0, 3, 2935.3102772960456, 7, 814.8193850933852, 5,
2672.393533820207, 6, 675.1730686007362],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, 1, 637.6902157810676, 3, 3072.6963790707114, 7,
1385.3649632111096, 5, 2727.1360691122813, 6, 128.24869775642176],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, 2, 1385.0871812075436, 3, 1094.8606633486436, 4,
1762.0085654338782, 5, 663.0104596559174, 6, 1965.702358742657],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, 2, 3225.288830967221, 3, 810.5429984051405, 4,
286.3387434682031, 5, 1243.8915481769327, 6, 3768.0652637796675],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 2,
3047.8382223981853, 3, 669.9731550451877, 4, 0.9311014399622559,
5, 1145.172578151837, 6, 3574.252994707609],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 2,
2294.0758201547073, 3, 211.06912863495492, 4, 857.4175399672413,
5, 289.35378153627966, 6, 2855.1657930463994],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 3, 2179.5701631051966, 7,
2003.4096341742952, 5, 1784.3132149978549, 6, 870.5252810680124],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 1,
900.7798955139455, 3, 3702.2394204188754, 7, 1287.7039084016499,
5, 3376.4438614084356, 6, 792.7526066105717],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 1,
770.188754517813, 3, 3154.296880053552, 7, 270.7018856738821, 5,
2997.898227057909, 6, 1443.9247752786023]
],
columns=[
LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, 'id_policia', 'dist_policia',
'id_comercio', 'dist_comercio', 'id_show', 'dist_show', 'id_risca-faca',
'dist_risca-faca', 'id_evento', 'dist_evento'
],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_pois_by_category(move_df, pois, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_events():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1,
'', inf, ''],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, '', inf, ''],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, '', inf, ''],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, '', inf, ''],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 2,
3047.8382223981853, 'evento da prefeitura'],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 2,
2294.0758201547073, 'evento da prefeitura'],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 'evento da prefeitura'],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 1,
900.7798955139455, 'show do tropykalia'],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 1,
770.188754517813, 'show do tropykalia']
],
columns=[
LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, EVENT_ID, DIST_EVENT, EVENT_TYPE
],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_events(move_df, pois, time_window=45000, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_event_by_dist_and_time():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, | Timestamp('2008-10-24 00:02:14') | pandas.Timestamp |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat
from pandas.core.base import DataError
from pandas.util import testing as tm
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame(
{
"value": np.random.randn(500),
"key1": lev1.take(lab1),
"key2": lev2.take(lab2),
}
)
result = df.groupby(["key1", "key2"]).value.rank()
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
expected = [
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, 8, 2, 6],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
],
],
)
@pytest.mark.parametrize(
"ties_method,ascending,pct,exp",
[
("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),
("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),
("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),
("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),
("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),
("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),
("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),
("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),
("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),
("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),
("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),
("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),
("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),
("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),
],
)
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,exp",
[
("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),
("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),
("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),
("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),
("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),
("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),
("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),
("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),
("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),
("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),
("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),
("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),
("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),
("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),
("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),
("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),
("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),
("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),
("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),
("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),
("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),
("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),
("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),
("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),
("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),
("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),
("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),
("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),
],
)
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
# GH 20561
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
np.nan,
pd.Timestamp("2018-01-08"),
| pd.Timestamp("2018-01-02") | pandas.Timestamp |
"""
Train a baseline model using a pytorch implementation of YOLO2
Ignore:
rsync -avzP ~/data/./viame-challenge-2018/phase0-imagery hermes:data/viame-challenge-2018/
rsync -avzP ~/data/viame-challenge-2018/phase0-imagery/./mbari_seq0 hermes:data/viame-challenge-2018/phase0-imagery
rsync -avzP ~/data/viame-challenge-2018/phase0-imagery/./mouss_seq1 hermes:data/viame-challenge-2018/phase0-imagery
rsync -avzP ~/data/viame-challenge-2018/phase0-imagery/./mouss_seq1 hermes:data/viame-challenge-2018/phase0-imagery
tar -xvzf /data/jowens/noaa
tar -xvzf /data/jowens/noaa/phase1-imagery.tar.gz -C /data/projects/noaa
tar -xvzf /data/jowens/noaa/phase1-annotations.tar.gz -C /data/projects/noaa
"""
import os
import torch
import cv2
import ubelt as ub
import pandas as pd
import numpy as np
import netharn as nh
from netharn.models.yolo2 import light_region_loss
from netharn.models.yolo2 import light_yolo
from fishnet.data import load_coco_datasets, YoloCocoDataset
class YoloHarn(nh.FitHarn):
def __init__(harn, **kw):
super().__init__(**kw)
harn.batch_confusions = []
harn.aps = {}
def prepare_batch(harn, raw_batch):
"""
ensure batch is in a standardized structure
"""
batch_inputs, batch_labels = raw_batch
inputs = harn.xpu.variable(batch_inputs)
labels = {k: harn.xpu.variable(d) for k, d in batch_labels.items()}
batch = (inputs, labels)
return batch
def run_batch(harn, batch):
"""
Connect data -> network -> loss
Args:
batch: item returned by the loader
Example:
>>> harn = setup_harness(bsize=2)
>>> harn.initialize()
>>> batch = harn._demo_batch(0, 'vali')
>>> #weights_fpath = light_yolo.demo_voc_weights()
>>> #state_dict = harn.xpu.load(weights_fpath)['weights']
>>> #harn.model.module.load_state_dict(state_dict)
>>> outputs, loss = harn.run_batch(batch)
"""
# Compute how many images have been seen before
bsize = harn.loaders['train'].batch_sampler.batch_size
nitems = len(harn.datasets['train'])
bx = harn.bxs['train']
n_seen = (bx * bsize) + (nitems * harn.epoch)
inputs, labels = batch
outputs = harn.model(inputs)
target = labels['targets']
gt_weights = labels['gt_weights']
loss = harn.criterion(outputs, target, seen=n_seen,
gt_weights=gt_weights)
return outputs, loss
def on_batch(harn, batch, outputs, loss):
"""
custom callback
Example:
>>> harn = setup_harness(bsize=1)
>>> harn.initialize()
>>> batch = harn._demo_batch(0, 'train')
>>> #weights_fpath = light_yolo.demo_voc_weights()
>>> #state_dict = harn.xpu.load(weights_fpath)['weights']
>>> #harn.model.module.load_state_dict(state_dict)
>>> outputs, loss = harn.run_batch(batch)
>>> harn.on_batch(batch, outputs, loss)
>>> # xdoc: +REQUIRES(--show)
>>> postout = harn.model.module.postprocess(outputs)
>>> from netharn.util import mplutil
>>> mplutil.qtensure() # xdoc: +SKIP
>>> harn.visualize_prediction(batch, outputs, postout, idx=0,
>>> thresh=0.2)
>>> mplutil.show_if_requested()
"""
if harn.current_tag != 'train':
# Dont worry about computing mAP on the training set for now
inputs, labels = batch
inp_size = np.array(inputs.shape[-2:][::-1])
try:
postout = harn.model.module.postprocess(outputs)
except Exception as ex:
harn.error('\n\n\n')
harn.error('ERROR: FAILED TO POSTPROCESS OUTPUTS')
harn.error('DETAILS: {!r}'.format(ex))
raise
for y in harn._measure_confusion(postout, labels, inp_size):
harn.batch_confusions.append(y)
metrics_dict = ub.odict()
metrics_dict['L_bbox'] = float(harn.criterion.loss_coord)
metrics_dict['L_iou'] = float(harn.criterion.loss_conf)
metrics_dict['L_cls'] = float(harn.criterion.loss_cls)
for k, v in metrics_dict.items():
if not np.isfinite(v):
raise ValueError('{}={} is not finite'.format(k, v))
return metrics_dict
def on_epoch(harn):
"""
custom callback
Example:
>>> harn = setup_harness(bsize=4)
>>> harn.initialize()
>>> batch = harn._demo_batch(0, 'vali')
>>> #weights_fpath = light_yolo.demo_voc_weights()
>>> #state_dict = harn.xpu.load(weights_fpath)['weights']
>>> #harn.model.module.load_state_dict(state_dict)
>>> outputs, loss = harn.run_batch(batch)
>>> # run a few batches
>>> harn.on_batch(batch, outputs, loss)
>>> harn.on_batch(batch, outputs, loss)
>>> harn.on_batch(batch, outputs, loss)
>>> # then finish the epoch
>>> harn.on_epoch()
"""
tag = harn.current_tag
if tag == 'vali':
harn._dump_chosen_validation_data()
if harn.batch_confusions:
y = pd.concat([ | pd.DataFrame(y) | pandas.DataFrame |
import pandas as pd
import numpy as np
import json
import string
from bids.grabbids import BIDSLayout
import nibabel as nib
# --------------------------------------------------------------------- PART 1 GET Metadata-------------------------------------
'''
load BIDS data grabber
load the VOLUMES from metadata for each subject
Create a Sub_ID_VOlumes dict
select subjects that have volumes > threshold
'''
data_directory = '/mnt/project1/home1/varunk/data/ABIDE2RawDataBIDS'
layout = BIDSLayout(data_directory)
subjects = layout.get_subjects()
subid_vol_dict = {}
subject_list = []
session = [1,2]
run = 1
bugs_abide2 = ['28093', '28093', '28681', '28682', '28683', '28687', '28711', '28712', '28713', '28741', '28745', '28751', '28755', '28756', '28757', '28758',
'28759', '28761', '28762','28763', '28764','28765','28766','28767','28768','28769','28770','28771','28772','28773','28774','28775','28776','28777','28778','28779',
'28780','28781','28782','28783', '29622'
]
for subject_id in subjects:
func_file_path = [f.filename for f in layout.get(subject=subject_id, type='bold',session = session[0], run=run, extensions=['nii', 'nii.gz'])]
if len(func_file_path) == 0:
func_file_path = [f.filename for f in layout.get(subject=subject_id, type='bold',session = session[1], run=run, extensions=['nii', 'nii.gz'])]
if len(func_file_path) == 0:
if subject_id not in bugs_abide2:
print('No Func file: %s'%subject_id)
continue
# print(func_file_path)
metadata = layout.get_metadata(path=func_file_path[0])
volumes = metadata['NumberofMeasurements']
try:
volumes = int(volumes)
except ValueError:
# Mixed Volumes site
brain_img = nib.load(func_file_path[0])
volumes = brain_img.shape[-1]
if volumes >= vols:
subid_vol_dict[subject_id] = volumes
subject_list.append(subject_id)
print('Subject: %s Volumes: %s'%(subject_id, volumes))
import pdb; pdb.set_trace()
# df = pd.read_csv('/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv') # , index_col='SUB_ID'
phenotype_file_path = '/mnt/project1/home1/varunk/data/ABIDE2RawDataBIDS/ABIDEII_Composite_Phenotypic (copy).csv'
scan_params_file_path = '/mnt/project1/home1/varunk/data/ABIDE2RawDataBIDS/scan_params_file.txt'
df = pd.read_csv(phenotype_file_path, encoding = "ISO-8859-1")
df = df.sort_values(['SUB_ID'])
with open(scan_params_file_path, 'r') as f:
scan_param_paths = f.read().split('\n')[0:-1]
print(scan_param_paths)
SITES = np.unique(df.as_matrix(['SITE_ID']).squeeze())
data_frame = | pd.DataFrame({
'SITE_NAME': [] ,
'TR': [],
'VOLUMES': [],
'xdim_mm': [],
'ydim_mm': [],
'zdim_mm': [],
'xdim_voxels': [],
'ydim_voxels': [],
'zdim_voxels': [],
'NUM_AUT_DSM_V': [] ,
'NUM_AUT_MALE_DSM_V': [] ,
'NUM_AUT_FEMALE_DSM_V': [],
'NUM_AUT_AGE_lte12_DSM_V' : [],
'NUM_AUT_AGE_12_18_DSM_V' : [],
'NUM_AUT_AGE_18_24_DSM_V': [],
'NUM_AUT_AGE_24_34_DSM_V' :[],
'NUM_AUT_AGE_34_50_DSM_V' : [],
'NUM_AUT_AGE_gt50_DSM_V' : [],
'NUM_AUT_DSM_IV' : [],
'NUM_AUT_MALE_DSM_IV' : [],
'NUM_AUT_FEMALE_DSM_IV' : [],
'NUM_ASP_DSM_IV' : [],
'NUM_ASP_MALE_DSM_IV' : [],
'NUM_ASP_FEMALE_DSM_IV' : [],
'NUM_PDDNOS_DSM_IV' : [],
'NUM_PDDNOS_MALE_DSM_IV' : [],
'NUM_PDDNOS_FEMALE_DSM_IV' : [],
'NUM_ASP_PDDNOS_DSM_IV' : [],
'NUM_ASP_PDDNOS_MALE_DSM_IV' : [],
'NUM_ASP_PDDNOS_FEMALE_DSM_IV' : [],
'NUM_TD' : [],
'NUM_TD_MALE' : [],
'NUM_TD_FEMALE' : [],
'NUM_TD_AGE_lte12' : [],
'NUM_TD_AGE_12_18' : [],
'NUM_TD_AGE_18_24' : [],
'NUM_TD_AGE_24_34' : [],
'NUM_TD_AGE_34_50' : [],
'NUM_TD_AGE_gt50' : []
}) | pandas.DataFrame |
#!/usr/bin/env python
import sys
sys.path
sys.path.append('../')
from experiments.visualizations import newfig, savefig_and_close, \
plot_df_heatmap, render_mpl_table, \
export_df
import itertools
import os
from os import walk
import sys
from argparse import ArgumentParser
import json
import pandas as pd
import numpy as np
from scipy.stats import friedmanchisquare, wilcoxon, ranksums
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig_extension = 'svg'
def get_list_results_folders(folder, essentials=['description.txt'],
finished=None, return_unfinished=False):
list_results_folders = []
list_unfinished_folders = []
for root, subdirs, files in walk(folder, followlinks=True):
if set(essentials).issubset(set(files)):
if set(finished).issubset(set(files)):
list_results_folders.append(root)
elif return_unfinished:
list_unfinished_folders.append(root)
if return_unfinished:
return list_results_folders, list_unfinished_folders
return list_results_folders
def format_diary_df(df):
df[2] = pd.to_datetime(df[2])
df[3] = pd.to_timedelta(df[3], unit='s')
new_column_names = {0: 'entry_n', 1: 'subentry_n', 2: 'date', 3: 'time'}
for i in range(5, df.shape[1], 2):
new_column_names[i] = df.ix[0, i-1]
df.rename(columns=new_column_names, inplace=True)
df.drop(list(range(4, df.shape[1], 2)), axis=1, inplace=True)
return df
def get_dataframe_from_csv(folder, filename, keep_time=False):
filename = os.path.join(folder, filename)
df = pd.read_csv(filename, header=None, quotechar='|',
infer_datetime_format=True)
df = format_diary_df(df)
if keep_time:
to_drop = ['entry_n', 'subentry_n']
else:
to_drop = ['entry_n', 'subentry_n', 'date', 'time']
df.drop(to_drop, axis=1, inplace=True)
return df
def extract_summary(folder):
dataset_df = get_dataframe_from_csv(folder, 'dataset.csv', keep_time=True)
results_df = get_dataframe_from_csv(folder, 'training.csv',
keep_time=False)
best_epoch = results_df.groupby(as_index='pid', by='epoch')['val_y_loss'].mean().argmin()
# FIXME the best epoch could be computed for all the summaries later on
# However, it seems easier at this point
results_df['best_epoch'] = best_epoch
model_df = get_dataframe_from_csv(folder, 'model.csv', keep_time=False)
dataset_df['folder'] = folder
results_df['folder'] = folder
model_df['folder'] = folder
summary = pd.merge(results_df, dataset_df)
summary = | pd.merge(summary, model_df) | pandas.merge |
'''
Genetic algorithm to conduct
association rules, optimizing
for a binary outcome (relative risk)
'''
import pandas as pd
import numpy as np
import copy
import random
from datetime import datetime
from scipy.stats import norm
from evol import Population, Evolution
import itertools
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
import matplotlib.plt as plt
def feat_map(x):
fm = {}
for v in list(x):
# Get not missing
vnm = pd.unique(x.loc[x[v].notna(), v]).tolist()
fm[v] = vnm
return fm
def sel_rand(fm):
res_sel = []
for k,i in fm.items():
res_sel += [np.random.choice(i)]
return res_sel
def sel_any(fm, prob):
sel_var = sel_rand(fm)
var_zer = np.random.binomial(1,prob,len(sel_var))
res_li = []
for s,v in zip(sel_var,var_zer):
if v == 0:
res_li.append(np.nan)
else:
res_li.append(s)
return res_li
class genrules():
def __init__(self,data,y_var,x_vars,w_var=None,k=2,penrat=16,
pen_var=0.2,clip_val=1e-3,min_samp=50,mut_prob=0.5,
leader_tot=100,neg_fit=-5):
"""
generating initial object and attaching data
"""
self.y_var = y_var
self.x_vars = x_vars
self.tot_n = data.shape[0]
if w_var is None:
self.w_var = 'weight'
self.w = | pd.Series(1, index=data.index) | pandas.Series |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
self.op_signals = np.array([[0, 0, 0, 0, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.15],
[0.2, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, -0.75, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.333, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0.2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.5, 0, 0, 0.15, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.2, 0, -1, 0.2, 0],
[0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.15, 0, 0],
[-1, 0, 0.25, 0.25, 0, 0.25, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.25, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, 0, 0, 0, 0],
[-1, 0, 0.15, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.op_signal_df = pd.DataFrame(self.op_signals, index=self.dates, columns=self.shares)
self.history_list = pd.DataFrame(self.prices, index=self.dates, columns=self.shares)
self.res = np.array([[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0, 33323.836],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 33174.614],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35179.466],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34465.195],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34712.354],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35755.550],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37895.223],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37854.284],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37198.374],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35916.711],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35806.937],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36317.592],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37103.973],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35457.883],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36717.685],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37641.463],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36794.298],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37073.817],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35244.299],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37062.382],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37420.067],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 38089.058],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 39260.542],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42609.684],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 43109.309],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42283.408],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43622.444],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42830.254],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41266.463],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41164.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41797.937],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42440.861],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42113.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43853.588],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 46216.760],
[0.000, 0.000, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 5140.743, 0.000, 45408.737],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 47413.401],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44603.718],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44381.544]])
def test_loop_step(self):
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.zeros(7, dtype='float'),
op=self.op_signals[0],
prices=self.prices[0],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
self.assertAlmostEqual(value, 10000.00)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=5059.722222,
pre_amounts=np.array([0, 0, 0, 0, 555.5555556,
205.0653595, 321.0891813]),
op=self.op_signals[3],
prices=self.prices[3],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 1201.2775195, 5)
self.assertTrue(np.allclose(amounts, np.array([346.9824373, 416.6786936, 0, 0,
555.5555556, 205.0653595, 321.0891813])))
self.assertAlmostEqual(value, 9646.111756, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=6179.77423,
pre_amounts=np.array([115.7186428, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0]),
op=self.op_signals[31],
prices=self.prices[31],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0])))
self.assertAlmostEqual(value, 21133.50798, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 938.6967231, 1339.207325]),
op=self.op_signals[60],
prices=self.prices[60],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5001.424618, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811, 269.8495646,
1785.205494, 938.6967231, 1339.207325])))
self.assertAlmostEqual(value, 33323.83588, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[61],
prices=self.prices[61],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 416.6786936, 1290.69215, 719.9239224,
1785.205494, 2701.487958, 1339.207325])))
self.assertAlmostEqual(value, 32820.29007, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=915.6208259,
pre_amounts=np.array([0, 416.6786936, 1290.69215, 719.9239224,
0, 2701.487958, 4379.098907]),
op=self.op_signals[96],
prices=self.prices[96],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5140.742779, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 1290.69215, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 45408.73655, 4)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[97],
prices=self.prices[97],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 2027.18825, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 47413.40131, 4)
def test_loop(self):
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res.values, self.res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestOperatorSubFuncs(unittest.TestCase):
def setUp(self):
mask_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.5, 0.0, 0.3, 1.0],
[0.5, 0.0, 0.3, 0.5],
[0.5, 0.5, 0.3, 0.5],
[0.5, 0.5, 0.3, 1.0],
[0.3, 0.5, 0.0, 1.0],
[0.3, 1.0, 0.0, 1.0]]
signal_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, -0.5],
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.5],
[-0.4, 0.0, -1.0, 0.0],
[0.0, 0.5, 0.0, 0.0]]
mask_multi = [[[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1]],
[[0, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 0]],
[[0, 0, 0., 0, 1],
[0, 0, 1., 0, 1],
[0, 0, 1., 0, 1],
[1, 0, 1., 0, 1],
[1, 1, .5, 1, 1],
[1, 0, .5, 1, 0],
[1, 1, .5, 1, 0],
[0, 1, 0., 0, 0],
[1, 0, 0., 0, 0],
[0, 1, 0., 0, 0]]]
signal_multi = [[[0., 0., 1., 1., 0.],
[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., -1., 0., 0.],
[-1., 0., 0., -1., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., -1., 0., 0., 0.]],
[[0., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., -1., 0., 0.],
[0., 0., 1., -1., -1.],
[0., 0., -1., 0., 0.],
[0., -1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[-1., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., -0.5, 1., 0.],
[0., -1., 0., 0., -1.],
[0., 1., 0., 0., 0.],
[-1., 0., -1., -1., 0.],
[1., -1., 0., 0., 0.],
[-1., 1., 0., 0., 0.]]]
self.mask = np.array(mask_list)
self.multi_mask = np.array(mask_multi)
self.correct_signal = np.array(signal_list)
self.correct_multi_signal = np.array(signal_multi)
self.op = qt.Operator()
def test_ls_blend(self):
"""测试多空蒙板的混合器,三种混合方式均需要测试"""
ls_mask1 = [[0.0, 0.0, 0.0, -0.0],
[1.0, 0.0, 0.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 1.0, 0.0, -1.0]]
ls_mask2 = [[0.0, 0.0, 0.5, -0.5],
[0.0, 0.0, 0.5, -0.3],
[0.0, 0.5, 0.5, -0.0],
[0.5, 0.5, 0.3, -0.0],
[0.5, 0.5, 0.3, -0.3],
[0.5, 0.5, 0.0, -0.5],
[0.3, 0.5, 0.0, -1.0],
[0.3, 1.0, 0.0, -1.0]]
ls_mask3 = [[0.5, 0.0, 1.0, -0.4],
[0.4, 0.0, 1.0, -0.3],
[0.3, 0.0, 0.8, -0.2],
[0.2, 0.0, 0.6, -0.1],
[0.1, 0.2, 0.4, -0.2],
[0.1, 0.3, 0.2, -0.5],
[0.1, 0.4, 0.0, -0.5],
[0.1, 0.5, 0.0, -1.0]]
# result with blender 'avg'
ls_blnd_avg = [[0.16666667, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.16666667, 0.76666667, -0.4],
[0.56666667, 0.16666667, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'str-1.5'
ls_blnd_str_15 = [[0, 0, 1, 0],
[0, 0, 1, -1],
[0, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'pos-2' == 'pos-2-0'
ls_blnd_pos_2 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 0, -1],
[1, 1, 0, -1]]
# result with blender 'pos-2-0.25'
ls_blnd_pos_2_25 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'avg_pos-2' == 'pos-2-0'
ls_blnd_avg_pos_2 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, -0.4],
[0.56666667, 0.00000000, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'avg_pos-2-0.25'
ls_blnd_avg_pos_2_25 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, 0.00000000],
[0.56666667, 0.00000000, 0.63333333, 0.00000000],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.00000000, -0.66666667],
[0.00000000, 0.63333333, 0.00000000, -0.83333333],
[0.00000000, 0.83333333, 0.00000000, -1.]]
# result with blender 'combo'
ls_blnd_combo = [[0.5, 0., 1.5, -0.9],
[1.4, 0., 1.5, -1.6],
[1.3, 0.5, 2.3, -1.2],
[1.7, 0.5, 1.9, -1.1],
[1.6, 1.7, 1.7, -1.5],
[1.6, 1.8, 1.2, -2.],
[0.4, 1.9, 0., -2.5],
[0.4, 2.5, 0., -3.]]
ls_masks = np.array([np.array(ls_mask1), np.array(ls_mask2), np.array(ls_mask3)])
# test A: the ls_blender 'str-T'
self.op.set_blender('ls', 'str-1.5')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'test A: result of ls_blender: str-1.5: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_str_15))
# test B: the ls_blender 'pos-N-T'
self.op.set_blender('ls', 'pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-1: result of ls_blender: pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2))
self.op.set_blender('ls', 'pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-2: result of ls_blender: pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25))
# test C: the ls_blender 'avg_pos-N-T'
self.op.set_blender('ls', 'avg_pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-1: result of ls_blender: avg_pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2, 5))
self.op.set_blender('ls', 'avg_pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-2: result of ls_blender: avg_pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25, 5))
# test D: the ls_blender 'avg'
self.op.set_blender('ls', 'avg')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test D: result of ls_blender: avg: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_avg))
# test E: the ls_blender 'combo'
self.op.set_blender('ls', 'combo')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test E: result of ls_blender: combo: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_combo))
def test_sel_blend(self):
"""测试选股蒙板的混合器,包括所有的混合模式"""
# step2, test blending of sel masks
pass
def test_bs_blend(self):
"""测试买卖信号混合模式"""
# step3, test blending of op signals
pass
def test_unify(self):
print('Testing Unify functions\n')
l1 = np.array([[3, 2, 5], [5, 3, 2]])
res = qt.unify(l1)
target = np.array([[0.3, 0.2, 0.5], [0.5, 0.3, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
l1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
res = qt.unify(l1)
target = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
def test_mask_to_signal(self):
signal = qt.mask_to_signal(self.mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_signal))
signal = qt.mask_to_signal(self.multi_mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_multi_signal))
class TestLSStrategy(qt.RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(qt.SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
# self.op.info()
def test_operator_ready(self):
"""test the method ready of Operator"""
pass
# print(f'operator is ready? "{self.op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
pass
# self.assertIsInstance(self.op, qt.Operator)
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
# self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 3)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 1)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# print(f'test adding strategies into existing op')
# print('test adding strategy by string')
# self.op.add_strategy('macd', 'timing')
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.timing[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 4)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# self.op.add_strategy('random', 'selecting')
# self.assertIsInstance(self.op.selecting[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 5)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.selecting_blender, '0 or 1')
# self.op.add_strategy('none', 'ricon')
# self.assertIsInstance(self.op.ricon[0], qt.TimingDMA)
# self.assertIsInstance(self.op.ricon[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 6)
# self.assertEqual(self.op.ricon_count, 2)
# self.assertEqual(self.op.timing_count, 2)
# print('test adding strategy by list')
# self.op.add_strategy(['dma', 'macd'], 'timing')
# print('test adding strategy by object')
# test_ls = TestLSStrategy()
# self.op.add_strategy(test_ls, 'timing')
def test_operator_remove_strategy(self):
"""test removing strategies from Operator"""
pass
# self.op.remove_strategy(stg='macd')
def test_property_get(self):
self.assertIsInstance(self.op, qt.Operator)
self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
self.assertEqual(self.op.selecting_count, 1)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.ricon_count, 1)
self.assertEqual(self.op.timing_count, 1)
print(self.op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy: \n{self.op.strategies[0].info()}')
self.assertEqual(len(self.op.strategies), 3)
self.assertIsInstance(self.op.strategies[0], qt.TimingDMA)
self.assertIsInstance(self.op.strategies[1], qt.SelectingAll)
self.assertIsInstance(self.op.strategies[2], qt.RiconUrgent)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close'])
self.assertEqual(self.op.opt_space_par, ([], []))
self.assertEqual(self.op.max_window_length, 270)
self.assertEqual(self.op.ls_blender, 'pos-1')
self.assertEqual(self.op.selecting_blender, '0')
self.assertEqual(self.op.ricon_blender, 'add')
self.assertEqual(self.op.opt_types, [0, 0, 0])
def test_prepare_data(self):
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._selecting_history_data, list)
self.assertIsInstance(self.op._timing_history_data, list)
self.assertIsInstance(self.op._ricon_history_data, list)
self.assertEqual(len(self.op._selecting_history_data), 1)
self.assertEqual(len(self.op._timing_history_data), 1)
self.assertEqual(len(self.op._ricon_history_data), 1)
sel_hist_data = self.op._selecting_history_data[0]
tim_hist_data = self.op._timing_history_data[0]
ric_hist_data = self.op._ricon_history_data[0]
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
"""
:return:
"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
self.assertIsInstance(self.op, qt.Operator, 'Operator Creation Error')
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.info()
op_list = self.op.create_signal(hist_data=self.hp1)
print(f'operation list is created: as following:\n {op_list}')
self.assertTrue(isinstance(op_list, pd.DataFrame))
self.assertEqual(op_list.shape, (26, 3))
# 删除去掉重复信号的code后,信号从原来的23条变为26条,包含三条重复信号,但是删除重复信号可能导致将不应该删除的信号删除,详见
# operator.py的create_signal()函数注释836行
target_op_dates = ['2016/07/08', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/18', '2016/07/20', '2016/07/22', '2016/07/26',
'2016/07/27', '2016/07/28', '2016/08/02', '2016/08/03',
'2016/08/04', '2016/08/05', '2016/08/08', '2016/08/10',
'2016/08/16', '2016/08/18', '2016/08/24', '2016/08/26',
'2016/08/29', '2016/08/30', '2016/08/31', '2016/09/05',
'2016/09/06', '2016/09/08']
target_op_values = np.array([[0.0, 1.0, 0.0],
[0.5, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
target_op = pd.DataFrame(data=target_op_values, index=target_op_dates, columns=['000010', '000030', '000039'])
target_op = target_op.rename(index=pd.Timestamp)
print(f'target operation list is as following:\n {target_op}')
dates_pairs = [[date1, date2, date1 == date2]
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]
signal_pairs = [[list(sig1), list(sig2), all(sig1 == sig2)]
for sig1, sig2
in zip(list(target_op.values), list(op_list.values))]
print(f'dates side by side:\n '
f'{dates_pairs}')
print(f'signals side by side:\n'
f'{signal_pairs}')
print([item[2] for item in dates_pairs])
print([item[2] for item in signal_pairs])
self.assertTrue(np.allclose(target_op.values, op_list.values, equal_nan=True))
self.assertTrue(all([date1 == date2
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]))
def test_operator_parameter_setting(self):
"""
:return:
"""
new_op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
print(new_op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{new_op.strategies[0].info()}')
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=None,
opt_tag=1,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=None,
opt_tag=0,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.timing[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.max_window_length, 20)
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id=32, pars=(1, 2))
self.op.set_blender('selecting', '0 and 1 or 2')
self.op.set_blender('ls', 'str-1.2')
self.assertEqual(self.op.ls_blender, 'str-1.2')
self.assertEqual(self.op.selecting_blender, '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.assertEqual(self.op.ricon_blender, 'add')
self.assertRaises(ValueError, self.op.set_blender, 'select', '0and1')
self.assertRaises(TypeError, self.op.set_blender, 35, '0 and 1')
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.opt_types, [1, 1, 0])
def test_exp_to_blender(self):
self.op.set_blender('selecting', '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.op.set_blender('selecting', '0 and ( 1 or 2 )')
self.assertEqual(self.op.selecting_blender_expr, ['and', '0', 'or', '1', '2'])
self.assertRaises(ValueError, self.op.set_blender, 'selecting', '0 and (1 or 2)')
def test_set_opt_par(self):
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=(0.5,),
opt_tag=0,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=(9, -0.23),
opt_tag=1,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (9, -0.23))
self.assertEqual(self.op.opt_types, [1, 0, 1])
self.op.set_opt_par((5, 12, 9, 8, -0.1))
self.assertEqual(self.op.timing[0].pars, (5, 12, 9))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (8, -0.1))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
self.assertRaises(ValueError, self.op.set_opt_par, (5, 12, 9, 8))
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'TIMING'
self.stg_name = "CROSSLINE STRATEGY"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(AssertionError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=qt.str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=qt.str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, qt.str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, qt.str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
new_values, equal_nan=True))
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(qt.list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(qt.list_or_slice('open', str_dict), [1])
self.assertEqual(list(qt.list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(qt.list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(qt.list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(qt.list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(qt.list_or_slice(0, str_dict)), [0])
self.assertEqual(list(qt.list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(qt.list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_label_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(qt.labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(qt.labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(qt.labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(qt.labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(prev_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_next_trade_day(self):
""" test the function next_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + | pd.Timedelta(2, 'd') | pandas.Timedelta |
"""
Compare USGS events to events in catalogue:
1) do the big events match?
2) if they match
3) if they don't match,
I can see the future: i have to write the association script mysef
"""
import pandas as pd
import numpy as np
from obspy.geodetics import gps2dist_azimuth
def parse_station_info(input_file):
# 'reusing functions is bad practice' yes haha
station_info = {}
with open(input_file, 'r') as f:
for line in f:
#print(line)
try:
sta, lon, lat = [x for x in line.strip().split("\t") if x != ""]
except:
sta, lon, lat = [x for x in line.strip().split(" ") if x != ""]
station_info[sta] = {"lon": float(lon), "lat": float(lat)}
return station_info
def dx(X1, X2):
"""
takes in two coordinate tuples (lon, lat), (lon, lat) returning their distance in kilometres
gps2dist_azimuth also returns the azimuth but i guess i don't need that yet
it also returns distances in metres so i just divide by 1000
the order doesn't matter
:param X1: The x 1
:type X1: { type_description }
:param X2: The x 2
:type X2: { type_description }
"""
#print(X1, X2)
return gps2dist_azimuth(X1[1], X1[0], X2[1], X2[0])[0] / 1000
def ip(X, Y):
if len(X) == 3:
# arithmetic average of in between gradients to approximate gradient at midpoint
return 0.5 * ((Y[2] - Y[1])/(X[2] - X[1]) + (Y[1] - Y[0])/(X[1] - X[0]))
if len(X) == 2:
return (Y[1] - Y[0])/(X[1] - X[0])
station_info = parse_station_info('../new_station_info.dat')
# usgs catalogue
udf = | pd.read_csv("usgs_aceh.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
def unprocessed(csv_file):
df = pd.read_csv(csv_file)
return df
def replace_characters(main_string, chars, new_string):
for char in chars:
try :
if char in main_string:
main_string = main_string.replace(char, new_string)
except:
continue
return main_string
def loadfinal(csv_file):
df = pd.read_csv(csv_file)
df1=(df[["id", 'name', 'host_name','price','accommodates', 'review_scores_rating','review_scores_value']]
.sort_values("accommodates",ascending=False, ignore_index=True)
)
df1['price'] = df1['price'].apply(lambda x : replace_characters(x, ['$', ','], '')).astype(float)
return df1
def loadall(csv_file):
df = | pd.read_csv(csv_file) | pandas.read_csv |
from subprocess import (Popen, PIPE)
import os
import csv
import json
from .ts import (convert_datetime, date_index)
import pandas as pd
from sensible.loginit import logger
log = logger(__name__)
#Git Globals
GIT_COMMIT_FIELDS = ['id', 'author_name', 'author_email', 'date', 'message']
GIT_LOG_FORMAT = ['%H', '%an', '%ae', '%ad', '%s']
GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e'
GIT_LOG_CMD = 'git log --no-merges --date=local --format="%s"' % GIT_LOG_FORMAT
GIT_UNIQUE_CMD = "git remote -v"
GIT_REPO_NAME = """basename `git rev-parse --show-toplevel`"""
GIT_CSV_COLUMN_NAMES = ["date","author_email", "author_name",
"id", "message", "repo"]
def df_from_csv(path):
df = | pd.read_csv(path) | pandas.read_csv |
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import math
import time
import ruptures as rpt
from datetime import datetime
data = pd.read_csv("test_anom2.csv")
print(data.head())
data.set_index(np.arange(len(data.index)), inplace=True)
def check_if_shift_v0(data, column_name, start_index, end_index, check_period):
""" using median to see if it changes significantly in shift """
period_before = data[column_name][start_index - check_period: start_index]
period_in_the_middle = data[column_name][start_index:end_index]
period_after = data[column_name][end_index: end_index + check_period]
period_before_median = abs(np.nanmedian(period_before))
period_in_the_middle_median = abs(np.nanmedian(period_in_the_middle))
period_after_median = abs(np.nanmedian(period_after))
upper_threshold = period_in_the_middle_median * 2
down_threshold = period_in_the_middle_median / 2
if (upper_threshold < period_before_median and upper_threshold < period_after_median) or\
(down_threshold > period_before_median and down_threshold > period_after_median):
return True
else:
return False
def prepare_data_to_test(data, data_name: str):
""" datetime type """
data["time"] = pd.to_datetime(data.time)
""" sort values """
data.sort_values(by=['time'], inplace=True)
""" set index """
data.set_index("time", inplace=True)
""" drop duplicate time"""
data = data[~data.index.duplicated(keep='first')]
""" resample """
data = data.resample('5T').pad()
""" reset index """
data.reset_index("time", inplace=True)
""" rename column names """
data.columns = ['time', data_name]
data.drop_duplicates(subset="time", inplace=True)
return data
def prepare_data_dp(data, column_to_fix):
data['time'] = pd.to_datetime(data['time'])
data['hour'] = | pd.to_datetime(data.time, unit='m') | pandas.to_datetime |
import xarray as _xr
import pandas as _pd
import numpy as _np
import pathlib as _pl
import traceback as _tb
import datetime as _dt
from email.mime.text import MIMEText as _MIMEText
import smtplib as _smtplib
import pathlib as __pl
import configparser as _cp
import magic as _magic
settings = """
[notify]
email_address = None
smtp = localhost
"""
def generate_config(p2sf):
if not p2sf.parent.is_dir():
p2sf.parent.mkdir()
with open(p2sf, 'w') as raus:
raus.write(settings)
def load_config():
p2sf = __pl.Path.home().joinpath('.ceilopy/config.ini')
if not p2sf.is_file():
generate_config(p2sf)
config = _cp.ConfigParser()
config.read(p2sf)
return config
class CorruptFileError(Exception):
"""Exception raised when File is not whats expected.
"""
def __init__(self, message):
super().__init__(message)
class MissingSerialNumberError(Exception):
"""Exception raised when File does not contain Serial number.
"""
def __init__(self, message):
super().__init__(message)
class SerialNumberMissmatchError(Exception):
"""Exception raised when Files doe not have the same serial number.
"""
def __init__(self, message):
super().__init__(message)
def read_L1(file, parent = None):
if isinstance(file, (str, _pl.Path)):
file = [file]
assert(isinstance(file, (_pd.Series,list, _np.array))), f'File type not recognized: {type(file)}'
ignore1 = ['name','message_type','version','date_stamp',
'period','tilt_angle',
'cloud_status','cloud_data','status_bits','profile_scale',
'profile_resolution','profile_length']
if not _np.all([_magic.from_file(fn.as_posix()) == 'Hierarchical Data Format (version 5) data' for fn in file]):
fnc = '\n\t'.join([fn.as_posix() for fn in file])
raise CorruptFileError(f'At least one of the following can not be identified as a netcdf file: \n\t {fnc}')
L1 = _xr.open_mfdataset(file, concat_dim = 'timeDim', drop_variables=ignore1)
L1 = L1.assign_coords(time = _pd.to_datetime(L1.time.values, unit = 's'))
for var in L1.variables:
if 'timeDim' in L1[var].dims:
L1[var] = L1[var].swap_dims({'timeDim':'time'})
return L1
# read hist file
##### Read Level3 hist files. #############################################
def read_level3_hist(file, parent = None):
def read_file(fn):
cols = ['CREATEDATE',' CEILOMETER',' CLOUD_STATUS',' CLOUD_1',' CLOUD_2',
' CLOUD_3'] # What columns to keep.
his3 = _pd.read_csv(fn, skiprows=1, header=0, sep=',',
na_values='-9999', index_col=0, parse_dates=True,
infer_datetime_format=True, usecols=cols)
his3.index.rename('time', inplace=True)
his3.columns = [col.strip() for col in his3.columns]
return his3
if isinstance(file, (str, _pl.Path)):
file = [file]
assert(isinstance(file, (_pd.Series,list, _np.array))), f'File type not recognized: {type(file)}'
df = _pd.concat([read_file(fn) for fn in file], sort = True)
#### testpoint
parent.tp_dfcc = df.copy()
# assert(df.index.duplicated().sum() == 0), 'There are duplicates in the hist file ... I would think this should not be happening. if it does un-comment the following line'
df = df[~df.index.duplicated(keep='first')] # Remove duplicates
return df
class Cl51CloudProdRetriever():
def __init__(self, poutg,
# check_serial = True,
):
self.poutg = poutg
# self.p2fnout = poutg.path2fn_out.unique()[0]
self._product_dataset = None
self.get_serial_numbers()
# if check_serial:
# self.check_serial()
def get_serial_numbers(self):
def get_serial(row):
# Extract serial numbers from files
key = row.file_type
file = row.path2raw
if key in ['L1', 'L2', 'bl']:
serial = file.name[-11:-3] # Extract serial number from L1 filename.
# elif key == 'L3':
# serial = files['L3'][-11:-3]
elif key in ['H2','H3','hist']:
h = _pd.read_csv(file, skiprows=1, header=0, sep=',')
serial = h[' CEILOMETER'][0].strip() # Extract serial number from H2 file.
else:
raise KeyError('File type unknown')
return serial
self.poutg['sn'] = self.poutg.apply(get_serial, axis = 1)
def check_serial(self, error_handling = 'raise'):
"""
Checks if the serial numbers in all the files are the same. In early
measurments the serial number was not stored ... use error_handling to
deal with occuring errors.
Parameters
----------
error_handling : str, optional
How to deal with errors. The default is 'raise'.
raise: raises occuring errors
allow_empty: do not raise an error if serial number is not available
Raises
------
KeyError
DESCRIPTION.
Returns
-------
serial : TYPE
DESCRIPTION.
"""
sn_series = self.poutg['sn'].copy()
# self.poutg['sn'] = sn_series.copy()
valid = ['raise', 'allow_empty']
assert(error_handling in valid), f'error_handling got an unexpected value ({error_handling}. Choose from: {valid})'
if error_handling == 'allow_empty':
sn_series = sn_series[sn_series.apply(lambda x: len(x)) != 0]
if sn_series.unique().shape[0] != 1:
if len(sn_series[sn_series.apply(lambda x: len(x)) != 0]) != len(sn_series):
fnj = '\n\t'.join([fn.as_posix() for fn in self.poutg.path2raw])
raise MissingSerialNumberError(f'At least one of the following files is missing a serial number:\n\t{fnj}')
raise SerialNumberMissmatchError(f'Serial numbers ({sn_series.unique()}) do not match')
@property
def product_dataset(self):
if isinstance(self._product_dataset, type(None)):
poutg = self.poutg
L1 = read_L1(poutg[poutg.file_type == 'bl'].path2raw)
dfL1 = L1.rcs_910.to_pandas()
assert(dfL1.index.duplicated().sum() == 0), "there are duplicates in L1's index, I would think this should be happening. if it does un-comment the following line"
# dfL1 = dfL1[~dfL1.index.duplicated(keep='first')]
his3 = read_level3_hist(poutg[poutg.file_type == 'hist'].path2raw, parent = self)
##### Clean and resample to 36s ###########################################
# resample to 36s even though L1 files are already at 36 sec because the
# time intervals on the L1 files from BL-View are sometimes off by a second.
# Keeping the limit at 1 step prevents the resample from repeating a nearby
# data point to fill in large gaps in the data.
dfL1 = dfL1.resample('36S').nearest(limit=1)
# The .his files are originally at 16 sec.
his3 = his3.resample('36S').nearest(limit=1)
# Do this to fill in an gaps in the data with nans to build com_plete days.
# Create a date range of a com_plete day with 36s intervals with no gaps.
day = _pd.date_range(dfL1.index[0].floor('D'), dfL1.index[-1].ceil('D'),freq='36S')
df = | _pd.DataFrame(index=day[:-1]) | pandas.DataFrame |
import seaborn as sns
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from sklearn.linear_model import LinearRegression
#load social distance and prep for join
social_dist_county_df = pd.read_pickle('data/social_dist_county_df.pkl').reset_index()
social_dist_county_df['date'] = pd.to_datetime(social_dist_county_df['date_range_start'], utc=True).dt.date
social_dist_county_df = social_dist_county_df.set_index(['county_fips', 'date'])
social_dist_county_df['median_home_dwell_time_pct'] = social_dist_county_df['weighted_median_home_dwell_time'] / (24*60)
#load epidemiology and prep for join
epidemiology_df = pd.read_pickle('data/us_data_with_latent_populations.pkl').reset_index()
epidemiology_df['county_fips'] = epidemiology_df['UID'].apply(lambda x: x[3:])
epidemiology_df = epidemiology_df.set_index(['county_fips', 'date'])
epidemiology_df['infections_as_ratio_of_case_based_infectious_population'] = epidemiology_df['infections_based_on_cases'] / epidemiology_df[
'case_based_infectious_population']
#check index overlap
epidemiology_df.index.difference(social_dist_county_df.index)
sd_epi_df = epidemiology_df.join(social_dist_county_df, how='inner')
#sum(sd_epi_df['pct_staying_home'].isna())
#sum(sd_epi_df['infections_as_ratio_of_case_based_infectious_population'].isna())
with pd.option_context('mode.use_inf_as_na', True):
#sd_epi_df = sd_epi_df.dropna(subset=['infections_as_ratio_of_case_based_infectious_population'])
data_filter = np.all(np.array([
sd_epi_df['infections_as_ratio_of_case_based_infectious_population'].notna().values,
sd_epi_df['infections_as_ratio_of_case_based_infectious_population'] < 2,
sd_epi_df['case_based_infectious_population'] > 20,
sd_epi_df.reset_index()['date'] < pd.datetime.today() - pd.Timedelta(10, 'days')
]), axis=0)
data = sd_epi_df[data_filter].copy()
sns.kdeplot(data=data['pct_staying_home'], data2=data['infections_as_ratio_of_case_based_infectious_population'])
sns.regplot(data=data, x='pct_staying_home', y='infections_as_ratio_of_case_based_infectious_population')
X = data['pct_staying_home'].values.reshape(-1,1)
y = data['infections_as_ratio_of_case_based_infectious_population']
reg = LinearRegression().fit(X,y)
print(reg.coef_)
#Look at dwell time instead
sns.kdeplot(data=data['median_home_dwell_time_pct'], data2=data['infections_as_ratio_of_case_based_infectious_population'])
sns.regplot(data=data, x='median_home_dwell_time_pct', y='infections_as_ratio_of_case_based_infectious_population')
X = data['median_home_dwell_time_pct'].values.reshape(-1,1)
y = data['infections_as_ratio_of_case_based_infectious_population']
reg = LinearRegression().fit(X,y)
print(reg.coef_)
#Looking over time at aggregate level
data['n_home'] = data['pct_staying_home'] * data['population']
us_aggregate = data.groupby('date').aggregate(
infections_based_on_cases = ("infections_based_on_cases", sum),
case_based_infectious_population = ('case_based_infectious_population', sum),
population = ('population',sum),
n_home = ('n_home',sum)
).reset_index()
us_aggregate['infections_as_ratio_of_case_based_infectious_population'] = us_aggregate['infections_based_on_cases'] / us_aggregate[
'case_based_infectious_population']
us_aggregate['pct_staying_home'] = us_aggregate['n_home']/us_aggregate['population']
fig, ax = plt.subplots()
ax = sns.lineplot(data=us_aggregate, x='date', y='infections_as_ratio_of_case_based_infectious_population', ax=ax)
ax2 = sns.lineplot(data=us_aggregate, x='date', y='pct_staying_home')
locator = mdates.AutoDateLocator(minticks=12, maxticks=18)
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
fig.show()
sns.set(rc={'figure.figsize': (20, 8.27)})
#Looking over time at top 200 counties
top_50 = sd_epi_df['new_cases'].groupby('county_fips').sum().sort_values()[-50:].index
with pd.option_context('mode.use_inf_as_na', True):
#sd_epi_df = sd_epi_df.dropna(subset=['infections_as_ratio_of_case_based_infectious_population'])
data_filter = np.all(np.array([
sd_epi_df['infections_as_ratio_of_case_based_infectious_population'].notna().values,
sd_epi_df.reset_index()['date'] < pd.datetime.today() - | pd.Timedelta(10, 'days') | pandas.Timedelta |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_without_partition_range(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": | pd.Series([0, 4], dtype="int64") | pandas.Series |
# Copyright 2020 Juneau
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The base class for all search functionalities in Juneau.
For instance, `WithProv` inherits from this class and specifies one
type of search functionality.
"""
import copy
import logging
import os
import pickle
import sys
from abc import abstractmethod
import numpy as np
import pandas as pd
from juneau.db.table_db import (
connect2db_engine,
connect2gdb,
fetch_all_table_names,
fetch_all_views,
)
from juneau.config import config
class SearchTables:
query = None
eng = None
geng = None
all_tables_read = {}
def __init__(self, schema, read_flag = True):
self.query = None
self.eng = connect2db_engine(config.sql.dbname)
self.geng = connect2gdb()
conn = self.eng.connect()
if schema:
logging.info("Indexing existing tables from data lake")
self.tables = fetch_all_table_names(schema, conn)
if read_flag:
count = 0
for i in self.tables:
try:
table_r = pd.read_sql_table(i, conn, schema=schema)
if 'Unnamed: 0' in table_r.columns:
table_r.drop(['Unnamed: 0'], axis=1, inplace=True)
self.all_tables_read[i] = table_r
count = count + 1
if count % 20 == 0:
logging.info("Indexed " + str(count) + " tables...")
except KeyboardInterrupt:
return
except ValueError:
logging.info("Value error, skipping table " + i)
continue
except TypeError:
logging.info("Type error, skipping table " + i)
continue
except:
logging.info("Error, skipping table " + i)
logging.error("Unexpected error:", sys.exc_info()[0])
continue
else:
logging.info("Indexing views from data lake")
self.tables = fetch_all_views(conn) # self.eng)
count = 0
for i in self.tables:
try:
table_r = pd.read_sql_table(i, conn) # self.eng)
self.all_tables_read[i] = table_r
count = count + 1
if count % 20 == 0:
logging.info("Indexed " + str(count) + " tables...")
except ValueError:
logging.info("Error, skipping table " + i)
continue
except TypeError:
logging.info("Type error, skipping table " + i)
continue
except KeyboardInterrupt:
return
except:
logging.info("Error, skipping table " + i)
logging.error("Unexpected error:", sys.exc_info()[0])
continue
conn.close()
logging.info(
"%s tables detected in the database." % len(self.tables)
)
logging.info(
"%s tables loaded from the database." % len(self.all_tables_read.items())
)
#self.init_schema_mapping()
@staticmethod
def line2cid(directory):
nb_l2c = {}
files = os.listdir(directory)
for f in files:
ind = pickle.load(open(os.path.join(directory, f), "rb"))
nb_l2c[f[:-2]] = ind
return nb_l2c
@staticmethod
def col_similarity(tableA, tableB, SM, key_factor):
col_sim_upper = 1 + float(len(SM.keys()) - 1) * float(key_factor)
tableA_not_in_tableB = []
for kyA in tableA.columns.tolist():
if kyA not in SM:
tableA_not_in_tableB.append(kyA)
col_sim_lower = len(tableB.columns.values) + len(tableA_not_in_tableB)
col_sim = float(col_sim_upper) / float(col_sim_lower)
return col_sim
@staticmethod
def row_similarity(colA, colB):
colA_value = colA[~pd.isnull(colA)].values
colB_value = colB[~ | pd.isnull(colB) | pandas.isnull |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, outdoor info
4. I processed building A to F by hand, then figured out that I can rename the files first, then use code to process
5. rename the file by type and number, such as window1, indoor1, ac1, door1, etc.
6. code automated G, H, I
7. the folder has multiple types of data, csv and xlsx, figure out the file type, then rean into pandas
8. concat the outdoor datetime and temperature with ac data, then judge if the ac is on or off
'''
import os
import glob
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/processed/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/_sql/'
# generate the name of different building folders
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
building_names = alphabet_list[:9]
''' 1. process data by folders '''
begin_time = datetime.datetime.now()
# create dataframe to store the data
combined_window = pd.DataFrame()
combined_door = pd.DataFrame()
combined_hvac = pd.DataFrame()
combined_indoor = pd.DataFrame()
combined_outdoor = pd.DataFrame()
''' process outdoor data '''
print(f'Process outdoor data')
os.chdir(data_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
outdoor_files = list(filter(lambda name: 'outdoor_building' in name, root_files)) # filter out the door status files
combined_outdoor = pd.concat([pd.read_csv(f) for f in outdoor_files])
''' manual processed data '''
print(f'Process manually processed data')
building_names_1 = building_names[:6]
# unit test
# i = 0
# folder_name = building_names_1[i]
for index, bld_name in enumerate(building_names_1):
print(f'Reading the data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
# combine
indoor_files = list(filter(lambda name: 'indoor' in name, root_files)) # filter out the indoor files
window_files = list(filter(lambda name: 'window' in name, root_files)) # filter out the window files
hvac_files = list(filter(lambda name: 'hvac' in name, root_files)) # filter out the ac files
door_files = list(filter(lambda name: 'door_status' in name, root_files)) # filter out the door status files
# read anc combine the files under this folder
if indoor_files: # make sure it is not empty
indoor_temp_df = pd.concat([pd.read_csv(f) for f in indoor_files])
combined_indoor = pd.concat([combined_indoor, indoor_temp_df], ignore_index=True) # concat the data
else:
pass
if window_files:
window_temp_df = pd.concat([pd.read_csv(f) for f in window_files])
combined_window = pd.concat([combined_window, window_temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
hvac_temp_df = pd.concat([pd.read_csv(f) for f in hvac_files])
combined_hvac = pd.concat([combined_hvac, hvac_temp_df], ignore_index=True) # concat the data
# print(combined_hvac.isnull().sum())
# print(index)
else:
pass
if door_files:
door_temp_df = pd.concat([pd.read_csv(f) for f in door_files])
combined_door = pd.concat([combined_door, door_temp_df], ignore_index=True) # concat the data
# print(combined_door.isnull().sum())
# print(index)
else:
pass
''' auto mated process by building level '''
building_names = ['G', 'H', 'I']
building_ids = [7, 8, 9]
for index, bld_name in enumerate(building_names):
print(f'Dealing with data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
'''' room level '''
for room_id in sub_folders:
print(f'Dealing with data under room folder {room_id}')
room_path = building_path + room_id + '/'
os.chdir(room_path) # pwd
file_names = os.listdir() # get all the file names
window_files = list(filter(lambda name: 'window' in name, file_names)) # filter out the window files
hvac_files = list(filter(lambda name: 'ac' in name, file_names)) # filter out the ac files
door_files = list(filter(lambda name: 'door' in name, file_names)) # filter out the door files
# read and combine files
if window_files:
for window_name in window_files:
name, extension = os.path.splitext(window_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status'] # rename the columns
else:
temp_df = pd.read_excel(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status']
temp_df['Window_ID'] = int(name.split('_')[0][6:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_window = pd.concat([combined_window, temp_df], ignore_index=True) # concat the data
else:
pass
if door_files:
for door_name in door_files:
name, extension = os.path.splitext(door_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status'] # rename the columns
else:
temp_df = pd.read_excel(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status']
temp_df['Door_ID'] = int(name.split('_')[0][4:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_door = pd.concat([combined_door, temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
for hvac_name in hvac_files:
name, extension = os.path.splitext(hvac_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
else:
temp_df = pd.read_excel(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
temp_df['HVAC_Zone_ID'] = int(name.split('_')[0][2:]) # get the number of ac
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_hvac = pd.concat([combined_hvac, temp_df], ignore_index=True) # concat the data
else:
pass
# drop na rows when specific column is null
combined_indoor = combined_indoor[combined_indoor['Date_Time'].notnull()]
combined_outdoor = combined_outdoor[combined_outdoor['Date_Time'].notnull()]
combined_window = combined_window[combined_window['Date_Time'].notnull()]
combined_door = combined_door[combined_door['Date_Time'].notnull()]
combined_hvac = combined_hvac[combined_hvac['Date_Time'].notnull()]
# process windows, door open/close data
combined_door['Door_Status'] = combined_door['Door_Status'].replace([0, 1, 2], [1, 0, 0])
combined_window['Window_Status'] = combined_window['Window_Status'].replace([0, 1, 2], [1, 0, 0])
# format datetime
print("Formatting datetime!")
combined_indoor['Date_Time'] = | pd.to_datetime(combined_indoor['Date_Time'], format='%m/%d/%Y %H:%M') | pandas.to_datetime |
import os
import re
import time
import pandas as pd
from urllib import request
from bs4 import BeautifulSoup as BS
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import TimeoutException, NoSuchElementException
POSITIONS = ['Data Scientist']
CHROME_DRIVER_PATH = os.getcwd() + '/chromedriver'
PAGE_LOAD = 30
MAX_PAGES = 2
def login(driver):
'''Load Glassdoor and then wait for login'''
driver.get('https://www.glassdoor.com/index.htm')
while True:
try:
WebDriverWait(driver, 1).until(expected_conditions.url_contains("member"))
except TimeoutException:
break
return True
def search_for_position(driver):
time.sleep(PAGE_LOAD)
try:
position_to_search_for = driver.find_element_by_xpath("//*[@id='sc.keyword']")
position_to_search_for.send_keys(POSITIONS[0])
driver.find_element_by_xpath(" //*[@id='scBar']/div/button").click()
return True
except NoSuchElementException:
return False
def clean_links(filtered_links):
cleaned_links = []
for link in filtered_links:
# Correct the address
link = link.replace("GD_JOB_AD", "GD_JOB_VIEW")
if link[0] == '/':
link = f"https://www.glassdoor.com{link}"
# Open the link and get the redirected URL
a_request = request.Request(link, None, {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0'})
try:
response = request.urlopen(a_request)
redirect = response.geturl()
# Skip Easy Applies
if "glassdoor" not in redirect:
cleaned_links.append(redirect)
except Exception:
pass
return set(cleaned_links)
def get_links(driver):
'''Get all the job links, excluding easy apply and errors'''
time.sleep(PAGE_LOAD)
# Use BS to sift throught the HTML
page_source = driver.page_source
soup = BS(page_source, features='lxml')
job_links = soup.findAll("a", {"class": "jobLink"})
filtered_links = [job_link['href'] for job_link in job_links]
return filtered_links
def scrape_job_links():
'''Scrape and compile all job links'''
page = 1
all_links = set()
next_url = ''
driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH)
if not (login(driver) and search_for_position(driver)):
driver.close()
while page < MAX_PAGES:
if page == 1:
print('Page Num:', page)
all_links.update(clean_links(get_links(driver)))
# Next page then update page and url
next_page = driver.find_element_by_xpath("//*[@id='FooterPageNav']/div/ul/li[3]/a")
this_page = next_page.get_attribute('href')
page_num = re.search('(?P<url>[^;]*?)(?P<page>.htm\?p=)(?P<pagenum>.)', this_page)
page += 1
next_url = f"{page_num.group('url')}_IP{page}.htm"
if page >=2 :
print('Page Num:', page)
driver.get(next_url)
all_links.update(clean_links(get_links(driver)))
# Next page then update page and url
page_num = re.search('(?P<url>[^;]*?)(?P<pagenum>.)(?P<html>.htm)', next_url)
page += 1
next_url = f"{page_num.group('url')}{page}.htm"
driver.close()
return all_links
def links_to_csv():
csv_name = (POSITIONS[0] + '_jobs.csv').replace(' ', '_')
job_links = scrape_job_links()
print('Scrape Complete')
df = | pd.DataFrame(job_links) | pandas.DataFrame |
from collections.abc import Mapping, Iterable
import os
import numpy as np
import warnings
import pandas as pd
from fmskill.comparison import ComparerCollection
import fmdap.diagnostic_output
from fmdap import pfs_helper as pfs
class DiagnosticCollection(Mapping):
@property
def names(self):
return list(self.diagnostics.keys())
@property
def df_attrs(self):
if self._df_attrs is None:
all_attrs = [self.diagnostics[n].attrs for n in self.names]
self._df_attrs = pd.DataFrame(all_attrs, index=self.names)
return self._df_attrs
def __init__(self, diagnostics=None, names=None, attrs=None):
self.diagnostics = {}
self._df_attrs = None
self._comparer = None
if diagnostics is not None:
self.add_diagnostics(diagnostics, names, attrs)
@classmethod
def from_pfs(cls, pfs_file, folder=None, types=[1, 2]):
df, DA_type = cls._parse_pfs(pfs_file, types)
df = cls._check_file_existance(df, folder)
dc = cls()
for _, row in df.iterrows():
name = row["name"] if (not pd.isnull(row["name"])) else None
attrs = row.dropna().to_dict()
attrs["DA_type"] = DA_type
attrs["pfs_file"] = pfs_file
dc._add_single_diagnostics(row["file_name"], name=name, attrs=attrs)
return dc
@classmethod
def _parse_pfs(cls, pfs_file, types=[1, 2]):
warnings.filterwarnings("ignore", message="Support for PFS files")
assert os.path.exists(pfs_file)
d = pfs.pfs2dict(pfs_file).get("DATA_ASSIMILATION_MODULE")
if d is None:
raise ValueError(
"'DATA_ASSIMILATION_MODULE' section could not be found in pfs file!"
)
DA_type = d.get("METHOD", {}).get("type", 0)
dfd = pfs.get_diagnostics_df(d)
if types is not None:
types = [types] if isinstance(types, int) else types
dfd = dfd[dfd.type.isin(types)]
if "include" not in dfd:
dfd["include"] = 1
else:
dfd.loc[dfd.include.isnull(), "include"] = 1
dfd.index.name = "output_id"
if dfd.type.isin([1]).any():
dfm = pfs.get_measurements_df(d)
if "include" not in dfm:
dfm["include"] = 1
else:
dfm.loc[dfm.include.isnull(), "include"] = 1
if DA_type == 0:
dfm["assimilated"] = False
else:
dfm["assimilated"] = dfm.include == 1
df = dfd.join(dfm, on="measurement_id", lsuffix="", rsuffix="_measurement")
else:
df = dfd
df = df[df.include == 1].drop(columns="include")
return df.dropna(axis=1, how="all"), DA_type
@classmethod
def _check_file_existance(cls, df, folder=None):
if folder is None:
folder = ""
file_name = np.array([os.path.join(folder, x) for x in df["file_name"]])
file_exists = np.array([os.path.exists(x) for x in file_name])
if "file_name_measurement" in df:
measurement_file_exists = np.array(
[
os.path.exists(os.path.join(folder, x))
for x in df["file_name_measurement"]
if (not | pd.isnull(x) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# In[113]:
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
from datetime import datetime,timedelta
import sys
sys.path.insert(0, '../src')
from util import GetFileStatus,date_range
# # Generate State file with Vaccination
# In[2]:
def main():
path = "/home/swiadmin/Incovid19/rdf/"
df_state = pd.read_csv("/home/swiadmin/test/csv/latest/states.csv")
df_state["Date"] = pd.to_datetime(df_state["Date"])
df_state_vacc = | pd.read_csv("/home/swiadmin/test/csv/latest/cowin_vaccine_data_statewise.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import yaml
from zipfile import ZipFile
from biom.table import Table
from biom.util import biom_open
import copy
import plotly.graph_objects as go
"""
import argparse
parser = argparse.ArgumentParser(description='Preparing a barplot from phyloseq barplot data.')
parser.add_arglument('--taxonomy','-t', dest='TAX', type=str, help='Taxonomy file.', required=True)
parser.add_argument('--feature','-f', dest='FEAT', type=str, help='Feature (OTU) file.', required=True)
parser.add_argument('--mapping','-m', dest='MAP', type=str, help='Mapping file.', required=True)
parser.add_argument('--taxonomySep','-ts', dest='TAX_SEP', type=str, help='Taxonomy file separator [class default is TAB].')
args = parser.parse_args()
########################################################################################################################
"""
class seqObject():
"""
This class represents a sequencing file object managing a mapping file, feature/otu file and taxonomy with some associated functions
"""
def __init__(self,
mappingFile=None,
taxonomyFile=None,
featureFile=None,
mappingSep = '\t',
taxonomySep = '\t',
featureSep = '\t',
sampleNamesColumn="Sample",
taxonomyDatabase=None,
representativeSeqFile=None,
featureFormat="automatic",
featureColumnName="None",
featureRowName="",
taxonomyFormat=None,
featureFilePickle=False,
matchFeaturesToTaxonomy=True,
threading=10):
"""
Creates and organizes class variables from the input files.
:param mappingFile: A file containing mapping file
:type mappingFile: str
:param taxonomyFile: A file containing the taxonomy with separate columns for taxonomic levels
:type taxonomyFile: str
:param featureFile: A file containing the feature/otu table with samples in columns and features/otus in rows
:type featureFile: str
:param mappingSep: Separator in the mapping file, default is '\t'
:type mappingSep: char
:param taxonomySep: Separator in the taxonomy file, default is '\t'
:type taxonomySep: char
:param featureSep: Separator in the feature/otu file, default is '\t'
:type featureSep: char
:param sampleNamesColumn: Name of the column containing sample names.
:type sampleNamesColumn: str
:param taxonomyDatabase: Selection of a database for a Silva taxonomy output (SILVA, RDP, GTDB, LTP, EMBL)
:type taxonomyDatabase: str
:param featureFilePickle: A flag determining whether the loaded feature file is in a pickle format for faster loading (default is False)
:type featureFilePickle: bool
"""
print("Initializing data loading.")
self.mapSampleNames = sampleNamesColumn
self.load_mapping(file=mappingFile, sep=mappingSep)
self.load_taxonomy(file=taxonomyFile, sep=taxonomySep, database=taxonomyDatabase, taxonomyFormat=taxonomyFormat)
self.load_feature(file=featureFile,
sep=featureSep,
format=featureFormat,
featureColumnName=featureColumnName,
representativeSeqFile=representativeSeqFile,
featureFilePickle=featureFilePickle,
threading=threading,
matchFeaturesToTaxonomy=matchFeaturesToTaxonomy)
def load_mapping(self, file, sep):
"""
Loading the mapping file and formatting it.
:param file: input mapping file
:type file: str
:param sep: separator character used in the mapping file
:type sep: char
:return: mapping file based dictionary
"""
print("Loading mapping file")
# Loading mapping file
#self.mapDict = {}
#with open(file, 'r') as f_map:
# Loading a header of the file
# self.mapHeader = f_map.readline().split(sep)
# self.mapHeader[-1] = self.mapHeader[-1].rstrip()
# Looking up a sample name column
#if self.sampleName in self.mapHeader:
# name_pos = self.mapHeader.index(self.sampleName)
#else:
# raise ValueError("Mapping file : Column '{0}' missing in mapping file. {0} is a mandatory field to identify sample names.".format(self.sampleName))
# Parsing the file into a dictionary
#for line in f_map:
# line = line.rstrip()
# line_list = line.split(sep)
# key = line_list[name_pos]
# line_list.pop(name_pos)
# self.mapDict[key] = line_list
# Removing the sample name from the mapping
#self.mapHeader.pop(name_pos)
self.df_map = pd.read_csv(file, sep=sep)
def load_taxonomy(self, file, sep, database, taxonomyFormat):
"""
Loading the taxonomy file and formatting it
:param file: Input taxonomy file.
:type file: str
:param sep: Separator character used in the taxonomy file.
:type sep: char
:return: taxonomy based dictionary
"""
print("Loading taxonomy")
# If file ending is .qza, extract taxonomic table from the file and proceed to treat it regularly
if file[-4:] == ".qza":
file = self._load_qiime_zip(file, "taxonomy.tsv")
self.taxDict = {}
# Switch cases functions for different inputs
def name_fun(header):
print("Recognizing a 'name' column.")
name_pos = header.index("Name")
format = "custom"
return name_pos, format
def qiime2_fun(header):
print("Recognizing a Qiime2 format.")
name_pos = header.index("Feature ID")
format = "Qiime2"
return name_pos, format
def mothur_fun(header):
print("Recognizing a mothur format.")
name_pos = 0
format = "mothur"
return name_pos, format
def silva_fun(header):
print("Recognizing a silva format.")
name_pos = 0
format = "silva"
return name_pos, format
def dada2_fun(header):
print("Recognizing a DADA2 format.")
name_pos = 0
format = "dada2"
return name_pos, format
# Guessing the format of the first cell that should contain the id od the taxonomy columns
def format_cases(argument):
switcher = {
"Name": name_fun,
"Feature ID": qiime2_fun,
"Otu1": mothur_fun,
'"job_id"': silva_fun,
'"name"' : silva_fun,
"name" : silva_fun,
'""' : dada2_fun
}
func = switcher.get(argument, lambda argument: (0, 0))
output = func(argument)
return output
with open(file, 'r') as f_tax:
# Loading a header of the file
self.taxHeader = f_tax.readline().split(sep)
self.taxHeader[-1] = self.taxHeader[-1].rstrip()
# Retrieving format of the data file and the position of the name column (just in case this is random)
name_pos, format = format_cases(self.taxHeader[0])
if taxonomyFormat:
format = taxonomyFormat
if format == 0:
raise ValueError("Taxonomy file : Unknown format of a taxonomy file. Please specify the taxonomy file format in 'taxonomyFormat'."
"Supported formats are: Qiime2, mothur, silva, dada2\n "
"Automatically recognized formats are: \n"
"Qiime2 format with a 'Feature ID' column and a 'Taxon' column. \n"
"Silva format with a 'name' column and taxonomy split in separate columns. \n"
"DADA2 format with an unnamed sample column and separate columns for each taxonomic category.")
## This should be in the new custom function
if format == "custom":
# Parsing the file into a dictionary
for line in f_tax:
line = line.rstrip()
line_list = line.split(sep)
key = line_list[name_pos]
line_list.pop(name_pos)
# Adding the OTU name at the end instead of the beginning of the list for the multiple index later
line_list.append(key)
self.taxDict[key] = line_list
# Removing the sample name from the mapping
self.taxHeader.pop(name_pos)
elif format == "seqDataClass":
self.df_tax = pd.read_csv(file, sep=sep)
def line_formatting(line):#, name_pos, taxonomy_pos):
line[1] = line[1].replace('"', '')
line_list = []
try:
for taxon in line[1].split(";"):
# Removing crazy irregular taxon levels that RDP got creative with. Thanks Obama.
if taxon[-4:] == "idae" or taxon[-5:] == "ineae":
#print(f"skipping {entry}")
continue
taxon = taxon.split('(')[0]
if len(taxon) != 0:
line_list.append(taxon)
except:
raise ValueError("It was not possible to process taxonomy line : {}. Exiting program. "
"Check the taxonomy file.".format(line))
if len(line_list) < 6:
filler = ["Unclassified"]*(6 - len(line_list))
line_list = line_list + filler
if len(line_list) > 7:
print(line_list)
self.taxDict[line[0]] = line_list
return 1
if format == "mothur":
line = self.taxHeader
line = sep.join(line)
line_formatting(line)
self.taxHeader = ["feature-name", "Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species"]
#for line in f_tax:
# line_formatting(line, name_pos=0, taxonomy_pos=1)
# Silva csv format
if format == "silva":
# Choosing the right database
if database in ["EMBL", "GTDB", "LTP", "RDP", "SILVA"]:
if database == "EMBL":
tax_column = "lca_tax_embl_ebi_ena"
print("Using the EMBL database.")
if database == "GTDB":
tax_column = "lca_tax_gtdb"
print("Using the GTDB database.")
if database == "LTP":
tax_column = "lca_tax_ltp"
print("Using the LTP database.")
if database == "RDP":
tax_column = "lca_tax_rdp"
print("Using the RDP database.")
if database == "SILVA":
tax_column = "lca_tax_slv"
print("Using the SILVA database.")
else:
raise Exception("Wrong database for Silva output was defined. Please select one from : GTDB, RDP, SILVA, "
"LTP, EMBL")
# Correcting redundant set of quotation marks
newHeader = []
for entry in self.taxHeader:
newHeader.append(entry.replace('"', ''))
self.taxHeader = newHeader
# Retrieving sequence identifier name
if 'sequence_identifier' in self.taxHeader:
name_pos = self.taxHeader.index("sequence_identifier")
elif "name" in self.taxHeader:
name_pos = self.taxHeader.index("name")
else:
raise Exception("Could not find the Feature identifier column. Should be either 'sequence_identifier"
" or 'name'. Correct that, please. Thank you.")
database_pos = self.taxHeader.index(tax_column)
#for line in f_tax:
# line_formatting(line, name_pos=name_pos, taxonomy_pos=database_pos)
silva_tax = pd.read_csv(file, sep=sep)
tax_cols = list(silva_tax.columns)
tax_cols.pop(name_pos)
tax_cols.pop(tax_cols.index(tax_column))
silva_tax.drop(tax_cols, axis=1, inplace=True)
import math
for i,row in silva_tax.iterrows():
line_formatting(row)
self.taxHeader = ["feature-id", "Kingdom", "Phylum", "Class", "Order", "Family", "Genus"]#, "Species"]
if format == "dada2":
self.df_tax = pd.read_csv(file)
#name_list = []
#for i,entry in enumerate(df_tax.iloc[:,0]):
# name_list.append("ASV_" + str(i))
columns = list(self.df_tax.columns)
columns[0] = "feature-id"
self.df_tax.columns = columns
# Renaming ASVs for convenience
#df_tax.iloc[:,0] = name_list
self.df_tax.fillna("Unclassified", inplace=True)
# We will re-open the file later in the appropriate function
if format == "Qiime2":
self.qiime2_taxonomy(file, name_pos, sep)
def qiime2_taxonomy(self, file, name_pos, sep):
"""
Loading the taxonomy file in the Qiime2 format and re-formatting it
:param file: Input taxonomy file.
:type file: str
:param name_pos: Position of the name column (column with the taxon codes - hash codes in Qiime2)
:type file: int
:param sep: Separator character used in the taxonomy file.
:type sep: char
:return: taxonomy based dictionary
"""
# Removing confidence column from the tax header.
#self.taxHeader.pop(self.taxHeader.index("Confidence"))
#self.taxHeader.pop(self.taxHeader.index("Taxon"))
self.taxHeader = ["feature-id", "Kingdom", "Phylum", "Class", "Order", "Family", "Genus"]
# Loading tables directly
with open(file, 'r') as f_in:
header = f_in.readline().split(sep)
if "Taxon" in header:
tax_val_index = header.index("Taxon")
else:
raise ValueError("Column 'Taxon' not found in the taxonomy file. This may be due to different version "
"of Qiime2 used.")
# This is to get an idea of distribution of taxonomic lengths (depth) within the tax file. For debugging.
#tax_len_dict = {1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0, 13:0, 14:0, 15:0}
for line in f_in:
line = line.split('\t')
# Taxon identifier code (name)
key = line[name_pos]
tax_value_raw = line[tax_val_index].split(';')
# Cleaning the names
tax_value = []
for value in tax_value_raw:
tax_value.append(value.split('_')[-1])
# Filling in the empty unidentified levels
# For now the taxonomic depth is hard-coded at 6
tax_len_level = 6
# This is again distribution of taxonomic lengths (depths) within the file. For debugging.
#tax_len_dict[len(tax_value)] = tax_len_dict[len(tax_value)] + 1
# First trimming the lengths
if len(tax_value) > tax_len_level:
tax_value = tax_value[0:tax_len_level]
# Now filling in the empty spaces
if len(tax_value) < tax_len_level:
how_much_shorter = tax_len_level - len(tax_value)
tax_value = tax_value + ["Unidentified"]*how_much_shorter
self.taxDict[key] = tax_value
#import sys
#sys.exit()
def load_feature(self, file, sep, format, featureColumnName, representativeSeqFile, featureFilePickle, threading, matchFeaturesToTaxonomy):
"""
Loading the feature/otu file and creating the final multi indexed table
:param format: Specification of an input format (Qiime2, dada2, custom)
:type format: str
:param file: Input mapping file.
:type file: str
:param sep: Separator character used in the feature/otu file.
:type sep: char
:param representativeSeqFile: Representative sequence file used to generate taxonomy. If taxonomy is generated
externally using DADA2, ASVs should be renamed to match that file exactly.
:type file: str
:param featureFilePickle: A flag that determines whether the loaded feature file is a pickle format.
:type featureFilePickle: bool
:param threading: Number of threads to be spawned by the process in moments where it is possible.
:type threading: int
:param matchFeaturesToTaxonomy: Boolean that turns off matching feature index to the taxonomy index. This can
be done to increase speed (default = True)
:type matchFeaturesToTaxonomy: bool
:return: Finalized multi indexed table containing both taxonomy and mapping file information.
"""
# If file ending is .qza, extract table from a biom hdf5 file
if file[-4:] == ".qza":
table_hdf5 = self._load_qiime_zip(file, "feature-table.biom")
with biom_open(table_hdf5) as f:
table_biom = Table.from_hdf5(f)
self.data = table_biom.to_dataframe(dense=True)
elif format == "automatic":
if featureFilePickle == True:
raise ValueError("Automatic feature table parsing unavailable for pickle file. Please specify the format (Qiime2, dada2, custom)")
with open(file, 'r') as f_in:
line = f_in.readline()
if line == "# Constructed from biom file\n":
format = "Qiime2"
if len(line.split(',')[0].replace('"', '')) == 0: # or len(line.split(",")[1]) > 100:
format = "dada2"
else:
format = "custom"
######################################
# Custom format
######################################
if format == "custom":
self.data = pd.read_csv(file, sep=sep)
if featureColumnName:
try:
self.data.index = self.data[featureColumnName]
self.data = self.data.drop([featureColumnName], axis=1)
except:
"Error: featureColumName does not match a colum in an input file"
else:
try:
self.data.index = self.data["#OTU ID"]
self.data = self.data.drop(["#OTU ID"], axis=1)
self.data.index = self.data.index.rename("ID")
except:
try:
self.data.index = self.data["Unnamed: 0"]
self.data = self.data.drop(["Unnamed: 0"], axis=1)
self.data.index = self.data.index.rename("ID")
except:
raise ValueError(
"ID/OTU column not found. Check the OTU name format.")
self.data.columns = self.data.columns.rename(self.mapSampleNames)
'''
# Renaming otus from OTU1 to OTU0001 etc.
new_rownames = []
# Retrieving number of characters in the longest feature name
feature_name_max_len = self.data.index.str.len().max()
# extracting a feature/otu name
name_len = 0
for letter in self.data.index[0]:
if letter.isdigit():
break
name_len = name_len + 1
feature_name = self.data.index[0][0:name_len]
for row in self.data.index:
if len(row) < feature_name_max_len:
# Stands for "Otu..."
row_number = row[name_len:]
zeros = (feature_name_max_len - name_len - len(row_number)) * '0'
new_row = feature_name + zeros + row_number
new_rownames.append(new_row)
self.data.index = new_rownames
for entry in self.taxDict:
if len(entry) < feature_name_max_len:
row = entry[3:]
zeros = (feature_name_max_len - 3 - len(row)) * '0'
row = "Otu" + zeros + row
self.taxDict[row] = self.taxDict.pop(entry)
'''
# Sorting the otu table
#self.data = self.data.sort_index()
######################################
# Qiime2 format
######################################
if format == "Qiime2":
# Skipping the first line of the table and using the second as a header
if featureFilePickle == True:
# Loading a pickle format file
self.data = pd.read_pickle(file)
else:
self.data = pd.read_csv(file, sep=sep, header=1)
self.data.index = self.data["#OTU ID"]
self.data = self.data.drop(["#OTU ID"], axis=1)
self.data.columns = self.data.columns.rename(self.mapSampleNames)
######################################
# DADA2 format
######################################
if format == "dada2":
if featureFilePickle == True:
self.data = pd.read_pickle(file)
else:
self.data = pd.read_csv(file, sep=sep, index_col=0)
self.data = self.data.transpose()
# If we have some representative sequences to check
if representativeSeqFile:
rep_seq_dict = {}
with open(representativeSeqFile, 'r') as f_rep:
for i, line in enumerate(f_rep):
if line[0] == '>':
feature_name = line[1:].rstrip()
sequence = f_rep.__next__()
sequence = sequence.rstrip()
if not sequence.isalpha():
raise ValueError(f"Representative sequence file seems "
f"corrupted on the line: {i}, sequence: {feature_name}.")
else:
# If fasta is spread over several lines, they are merged together.
# Performing a reverse lookup of a key (sequence) belonging to a specific value (seq name)
key = next(key for key, value in rep_seq_dict.items() if value == feature_name)
# Adding the new sequence at the end of the old one, saving the new key and deleting the old one
new_key = key + line.rstrip()
rep_seq_dict[new_key] = rep_seq_dict[key]
del rep_seq_dict[key]
# Saving sequences into the representative sequence dictionary
rep_seq_dict[sequence] = feature_name
new_index = []
for sequence in self.data.index:
try:
new_index.append(rep_seq_dict[sequence])
except:
raise ValueError(f"Sequence '{sequence}' not found in the representative set.")
self.data.index = new_index
print("Representative sequences checked successfuly and features renamed using thir values.")
# Sorting the rows to match the feature table
######################################
# seqDataClass format
######################################
if format == "seqDataClass":
self.data = pd.read_csv(file, sep=sep, index_col=0)
if self.taxDict:
for entry in self.taxDict:
if len(self.taxDict[entry]) != 6:
print(entry)
print("Tax Dictionary entry wrong legnth! It should be 6 and is : {}".format(len(self.taxDict[entry])))
break
# Adding a multi index
self.df_tax = pd.DataFrame.from_dict(self.taxDict, orient='index')
# Inserting column in the first place of the table
self.df_tax.insert(0, "feature-id", self.df_tax.index)
# This is because some taxnomic classifiers add even species level and we don't want that kind of stuff here
if 6 in self.df_tax.columns:
self.df_tax.drop(labels=6, axis=1, inplace=True)
self.df_tax.columns = ["feature-id", "Kingdom", "Phylum", "Class", "Order", "Family", "Genus"]
sorter = list(self.data.index)
sorterIndex = dict(zip(sorter, range(len(sorter))))
self.df_tax["tax_rank"] = self.df_tax["feature-id"].map(sorterIndex)
self.df_tax.sort_values(["tax_rank"], ascending=True, inplace=True)
#del self.df_tax["Rank"]
#df1 = df1.transpose()
if len(sorter) > len(self.df_tax.index):
#TODO: This bit is probably broken at the moment
for entry in self.df_tax.index:
sorter.pop(sorter.index(entry))
raise ValueError("The following features were not present in the taxonomy : {}. Please add them.".format(sorter))
elif len(sorter) < len(self.df_tax.index):
print("Taxonomy has some redundant features. Attempting to prune these.")
for entry in self.df_tax["feature-id"]:
if entry not in sorter:
self.df_tax = self.df_tax[self.df_tax['feature-id'] != entry]
# Sanity check that all features in sorter (feature file) are also in self.df_tax_index (taxonomy file)
#for entry in self.df_tax.index:
if matchFeaturesToTaxonomy:
print("Matching features between feature file and taxonomy file.")
tot_len = len(self.df_tax)
import concurrent.futures
def feature_match(entry_list):
for entry in entry_list:
# if entry is just a number
try:
if int(entry) not in sorter:
raise ValueError("Feature file and taxonomy file features mismatch at {}".format(entry))
except:
if entry not in sorter:
raise ValueError("Feature file and taxonomy file features mismatch at {}".format(entry))
# Printing progress percentage, but only every 10th entry to not overwhelm the jupyter lab
#print("finished entries 50")
return 2500
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
i = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=threading) as executor:
futures = []
for chunk in chunks(self.df_tax["feature-id"], n = 2500):
futures.append(executor.submit(feature_match, entry_list=chunk))
#print(chunk)
for future in concurrent.futures.as_completed(futures):
i = i + future.result()
print(f"\r Progress {100 * (i / tot_len):.2f}%", end="\r", flush=True)
print("finished matching")
for i, entry in enumerate(self.df_tax["feature-id"]):
# if entry is just a number
try:
if int(entry) not in sorter:
raise ValueError("Feature file and taxonomy file features mismatch at {}".format(entry))
except:
if entry not in sorter:
raise ValueError("Feature file and taxonomy file features mismatch at {}".format(entry))
# Printing progress percentage, but only every 10th entry to not overwhelm the jupyter lab
if i%10 == 0:
print(f"\r Progress {100*(i/tot_len):.2f}%", end="\r", flush=True)
print("Features match.")
# Renaming the data frame columns names (but not for DADA2, which doesn't produce taxDict
#if self.taxDict:
# new_columns = self.taxHeader[0:len(self.df_tax.columns)]
# #new_columns.append("tax_rank")
# self.df_tax.columns = new_columns
tax_index = pd.MultiIndex.from_frame(self.df_tax)
# Setting the taxonomy indices
self.data.index = tax_index
# Arranging the values of the Multiindex according to the data frame
#self.df_map = pd.DataFrame.from_dict(self.mapDict)
#self.df_map = self.df_map.transpose()
#self.df_map["sample-id"] = list(self.df_map.index)
# Sorting based on the columns present in the feature table
sorter = list(self.data.columns)
sorterIndex = dict(zip(sorter, range(len(sorter))))
for name in self.df_map[self.mapSampleNames]:
if name not in sorter:
raise ValueError(f" Sample names in mapping file are not matching sample names in feature table! {name}.")
self.df_map["map_rank"] = self.df_map[self.mapSampleNames].map(sorterIndex)
self.df_map.sort_values(["map_rank"], ascending=True, inplace=True)
#del self.df_map["Rank"]
# Creating a data frame from the map ditionary
#df2 = df2.transpose()
# Setting the names of columns
#self.mapHeader.append("sample-id")
#self.mapHeader.append("map_rank")
#self.df_map.columns = self.mapHeader
del self.df_map["map_rank"]
map_index = | pd.MultiIndex.from_frame(self.df_map) | pandas.MultiIndex.from_frame |
import os
if not os.path.exists("temp"):
os.mkdir("temp")
def add_pi_obj_func_test():
import os
import pyemu
pst = os.path.join("utils","dewater_pest.pst")
pst = pyemu.optimization.add_pi_obj_func(pst,out_pst_name=os.path.join("temp","dewater_pest.piobj.pst"))
print(pst.prior_information.loc["pi_obj_func","equation"])
#pst._update_control_section()
assert pst.control_data.nprior == 1
def fac2real_test():
import os
import numpy as np
import pyemu
# pp_file = os.path.join("utils","points1.dat")
# factors_file = os.path.join("utils","factors1.dat")
# pyemu.utils.gw_utils.fac2real(pp_file,factors_file,
# out_file=os.path.join("utils","test.ref"))
pp_file = os.path.join("utils", "points2.dat")
factors_file = os.path.join("utils", "factors2.dat")
pyemu.geostats.fac2real(pp_file, factors_file,
out_file=os.path.join("temp", "test.ref"))
arr1 = np.loadtxt(os.path.join("utils","fac2real_points2.ref"))
arr2 = np.loadtxt(os.path.join("temp","test.ref"))
#print(np.nansum(np.abs(arr1-arr2)))
#print(np.nanmax(np.abs(arr1-arr2)))
nmax = np.nanmax(np.abs(arr1-arr2))
assert nmax < 0.01
# import matplotlib.pyplot as plt
# diff = (arr1-arr2)/arr1 * 100.0
# diff[np.isnan(arr1)] = np.nan
# p = plt.imshow(diff,interpolation='n')
# plt.colorbar(p)
# plt.show()
def vario_test():
import numpy as np
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
h = v._h_function(np.array([0.0]))
assert h == contribution
h = v._h_function(np.array([a*1000]))
assert h == 0.0
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
print(v2._h_function(np.array([a])))
def aniso_test():
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
v3 = const(contribution,a,anisotropy=2.0,bearing=0.0)
pt0 = (0,0)
pt1 = (1,0)
assert v.covariance(pt0,pt1) == v2.covariance(pt0,pt1)
pt0 = (0,0)
pt1 = (0,1)
assert v.covariance(pt0,pt1) == v3.covariance(pt0,pt1)
def geostruct_test():
import pyemu
v1 = pyemu.utils.geostats.ExpVario(0.1,2.0)
v2 = pyemu.utils.geostats.GauVario(0.1,2.0)
v3 = pyemu.utils.geostats.SphVario(0.1,2.0)
g = pyemu.utils.geostats.GeoStruct(0.2,[v1,v2,v3])
pt0 = (0,0)
pt1 = (0,0)
print(g.covariance(pt0,pt1))
assert g.covariance(pt0,pt1) == 0.5
pt0 = (0,0)
pt1 = (1.0e+10,0)
assert g.covariance(pt0,pt1) == 0.2
def struct_file_test():
import os
import pyemu
structs = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))
#print(structs[0])
pt0 = (0,0)
pt1 = (0,0)
for s in structs:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
with open(os.path.join("utils","struct_out.dat"),'w') as f:
for s in structs:
s.to_struct_file(f)
structs1 = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct_out.dat"))
for s in structs1:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
def covariance_matrix_test():
import os
import pandas as pd
import pyemu
pts = pd.read_csv(os.path.join("utils","points1.dat"),delim_whitespace=True,
header=None,names=["name","x","y"],usecols=[0,1,2])
struct = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))[0]
struct.variograms[0].covariance_matrix(pts.x,pts.y,names=pts.name)
print(struct.covariance_matrix(pts.x,pts.y,names=pts.name).x)
def setup_ppcov_simple():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.struct2.out"),'','']
args3 = [pts_file,'0.0',str_file,"struct3",os.path.join("utils","ppcov.struct3.out"),'','']
for args in [args1,args2,args3]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_simple_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
mat1_file = os.path.join("utils","ppcov.struct1.out")
mat2_file = os.path.join("utils","ppcov.struct2.out")
mat3_file = os.path.join("utils","ppcov.struct3.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
ppc_mat3 = pyemu.Cov.from_ascii(mat3_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2,struct3 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
print(struct3)
for mat,struct in zip([ppc_mat1,ppc_mat2,ppc_mat3],[struct1,struct2,struct3]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
print(str_mat.row_names)
delt = mat.x - str_mat.x
assert np.abs(delt).max() < 1.0e-7
def setup_ppcov_complex():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.complex.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.complex.struct2.out"),'','']
for args in [args1,args2]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_complex_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
mat1_file = os.path.join("utils","ppcov.complex.struct1.out")
mat2_file = os.path.join("utils","ppcov.complex.struct2.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
for mat,struct in zip([ppc_mat1,ppc_mat2],[struct1,struct2]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
delt = mat.x - str_mat.x
print(mat.x[:,0])
print(str_mat.x[:,0])
print(np.abs(delt).max())
assert np.abs(delt).max() < 1.0e-7
#break
def pp_to_tpl_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
print(pp_df.columns)
def tpl_to_dataframe_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
df_tpl = pyemu.pp_utils.pp_tpl_to_dataframe(pp_file+".tpl")
assert df_tpl.shape[0] == pp_df.shape[0]
# def to_mps_test():
# import os
# import pyemu
# jco_file = os.path.join("utils","dewater_pest.jcb")
# jco = pyemu.Jco.from_binary(jco_file)
# #print(jco.x)
# pst = pyemu.Pst(jco_file.replace(".jcb",".pst"))
# #print(pst.nnz_obs_names)
# oc_dict = {oc:"l" for oc in pst.nnz_obs_names}
# obj_func = {name:1.0 for name in pst.par_names}
#
# #pyemu.optimization.to_mps(jco=jco_file)
# #pyemu.optimization.to_mps(jco=jco_file,obs_constraint_sense=oc_dict)
# #pyemu.optimization.to_mps(jco=jco_file,obj_func="h00_00")
# decision_var_names = pst.parameter_data.loc[pst.parameter_data.pargp=="q","parnme"].tolist()
# pyemu.optimization.to_mps(jco=jco_file,obj_func=obj_func,decision_var_names=decision_var_names,
# risk=0.975)
def setup_pp_test():
import os
import pyemu
try:
import flopy
except:
return
model_ws = os.path.join("..","examples","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
pp_dir = os.path.join("utils")
#ml.export(os.path.join("temp","test_unrot_grid.shp"))
sr = pyemu.helpers.SpatialReference().from_namfile(
os.path.join(ml.model_ws, ml.namefile),
delc=ml.dis.delc, delr=ml.dis.delr)
sr.rotation = 0.
par_info_unrot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr, prefix_dict={0: "hk1",1:"hk2"},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
#print(par_info_unrot.parnme.value_counts())
gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(a=1000,contribution=1.0))
ok = pyemu.geostats.OrdinaryKrige(gs,par_info_unrot)
ok.calc_factors_grid(sr)
sr2 = pyemu.helpers.SpatialReference.from_gridspec(
os.path.join(ml.model_ws, "test.spc"), lenuni=2)
par_info_drot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr2, prefix_dict={0: ["hk1_", "sy1_", "rch_"]},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr2)
par_info_mrot = pyemu.pp_utils.setup_pilotpoints_grid(ml,prefix_dict={0:["hk1_","sy1_","rch_"]},
every_n_cell=2,pp_dir=pp_dir,tpl_dir=pp_dir,
shapename=os.path.join("temp","test_unrot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(ml.sr)
sr.rotation = 15
#ml.export(os.path.join("temp","test_rot_grid.shp"))
#pyemu.gw_utils.setup_pilotpoints_grid(ml)
par_info_rot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr,every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_rot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr)
print(par_info_unrot.x)
print(par_info_drot.x)
print(par_info_mrot.x)
print(par_info_rot.x)
def read_hob_test():
import os
import pyemu
hob_file = os.path.join("utils","HOB.txt")
df = pyemu.gw_utils.modflow_hob_to_instruction_file(hob_file)
print(df.obsnme)
def read_pval_test():
import os
import pyemu
pval_file = os.path.join("utils", "meras_trEnhance.pval")
pyemu.gw_utils.modflow_pval_to_template_file(pval_file)
def pp_to_shapefile_test():
import os
import pyemu
try:
import shapefile
except:
print("no pyshp")
return
pp_file = os.path.join("utils","points1.dat")
shp_file = os.path.join("temp","points1.dat.shp")
pyemu.pp_utils.write_pp_shapfile(pp_file)
def write_tpl_test():
import os
import pyemu
tpl_file = os.path.join("utils","test_write.tpl")
in_file = os.path.join("temp","tpl_test.dat")
par_vals = {"q{0}".format(i+1):12345678.90123456 for i in range(7)}
pyemu.pst_utils.write_to_template(par_vals,tpl_file,in_file)
def read_pestpp_runstorage_file_test():
import os
import pyemu
rnj_file = os.path.join("utils","freyberg.rnj")
#rnj_file = os.path.join("..", "..", "verification", "10par_xsec", "master_opt1","pest.rnj")
p1,o1 = pyemu.helpers.read_pestpp_runstorage(rnj_file)
p2,o2 = pyemu.helpers.read_pestpp_runstorage(rnj_file,9)
diff = p1 - p2
diff.sort_values("parval1",inplace=True)
def smp_to_ins_test():
import os
import pyemu
smp = os.path.join("utils","TWDB_wells.smp")
ins = os.path.join('temp',"test.ins")
try:
pyemu.pst_utils.smp_to_ins(smp,ins)
except:
pass
else:
raise Exception("should have failed")
pyemu.smp_utils.smp_to_ins(smp,ins,True)
def master_and_workers():
import shutil
import pyemu
worker_dir = os.path.join("..","verification","10par_xsec","template_mac")
master_dir = os.path.join("temp","master")
if not os.path.exists(master_dir):
os.mkdir(master_dir)
assert os.path.exists(worker_dir)
pyemu.helpers.start_workers(worker_dir,"pestpp","pest.pst",1,
worker_root="temp",master_dir=master_dir)
#now try it from within the master dir
base_cwd = os.getcwd()
os.chdir(master_dir)
pyemu.helpers.start_workers(os.path.join("..","..",worker_dir),
"pestpp","pest.pst",3,
master_dir='.')
os.chdir(base_cwd)
def first_order_pearson_regul_test():
import os
from pyemu import Schur
from pyemu.utils.helpers import first_order_pearson_tikhonov,zero_order_tikhonov
w_dir = "la"
sc = Schur(jco=os.path.join(w_dir,"pest.jcb"))
pt = sc.posterior_parameter
zero_order_tikhonov(sc.pst)
first_order_pearson_tikhonov(sc.pst,pt,reset=False)
print(sc.pst.prior_information)
sc.pst.rectify_pi()
assert sc.pst.control_data.pestmode == "regularization"
sc.pst.write(os.path.join('temp','test.pst'))
def zero_order_regul_test():
import os
import pyemu
pst = pyemu.Pst(os.path.join("pst","inctest.pst"))
pyemu.helpers.zero_order_tikhonov(pst)
print(pst.prior_information)
assert pst.control_data.pestmode == "regularization"
pst.write(os.path.join('temp','test.pst'))
pyemu.helpers.zero_order_tikhonov(pst,reset=False)
assert pst.prior_information.shape[0] == pst.npar_adj * 2
def kl_test():
import os
import numpy as np
import pandas as pd
import pyemu
import matplotlib.pyplot as plt
try:
import flopy
except:
print("flopy not imported...")
return
model_ws = os.path.join("..","verification","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
str_file = os.path.join("..","verification","Freyberg","structure.dat")
arr_tru = np.loadtxt(os.path.join("..","verification",
"Freyberg","extra_crispy",
"hk.truth.ref")) + 20
basis_file = os.path.join("utils","basis.jco")
tpl_file = os.path.join("utils","test.tpl")
factors_file = os.path.join("temp","factors.dat")
num_eig = 100
prefixes = ["hk1"]
df = pyemu.utils.helpers.kl_setup(num_eig=num_eig, sr=ml.sr,
struct=str_file,
factors_file=factors_file,
basis_file=basis_file,
prefixes=prefixes,islog=False)
basis = pyemu.Matrix.from_binary(basis_file)
basis = basis[:,:num_eig]
arr_tru = np.atleast_2d(arr_tru.flatten()).transpose()
proj = np.dot(basis.T.x,arr_tru)[:num_eig]
#proj.autoalign = False
back = np.dot(basis.x, proj)
back = back.reshape(ml.nrow,ml.ncol)
df.parval1 = proj
arr = pyemu.geostats.fac2real(df,factors_file,out_file=None)
fig = plt.figure(figsize=(10, 10))
ax1, ax2 = plt.subplot(121),plt.subplot(122)
mn,mx = arr_tru.min(),arr_tru.max()
print(arr.max(), arr.min())
print(back.max(),back.min())
diff = np.abs(back - arr)
print(diff.max())
assert diff.max() < 1.0e-5
def ok_test():
import os
import pandas as pd
import pyemu
str_file = os.path.join("utils","struct_test.dat")
pts_data = pd.DataFrame({"x":[1.0,2.0,3.0],"y":[0.,0.,0.],"name":["p1","p2","p3"]})
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
interp_points = pts_data.copy()
kf = ok.calc_factors(interp_points.x,interp_points.y)
#for ptname in pts_data.name:
for i in kf.index:
assert len(kf.loc[i,"inames"])== 1
assert kf.loc[i,"ifacts"][0] == 1.0
assert sum(kf.loc[i,"ifacts"]) == 1.0
print(kf)
def ok_grid_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
kf = ok.calc_factors_grid(sr,verbose=False,var_filename=os.path.join("temp","test_var.ref"),minpts_interp=1)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ok_grid_zone_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
pts_data.loc[:,"zone"] = 1
pts_data.zone.iloc[1] = 2
print(pts_data.zone.unique())
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
zone_array = np.ones((nrow,ncol))
zone_array[0,0] = 2
kf = ok.calc_factors_grid(sr,verbose=False,
var_filename=os.path.join("temp","test_var.ref"),
minpts_interp=1,zone_array=zone_array)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ppk2fac_verf_test():
import os
import numpy as np
import pyemu
try:
import flopy
except:
return
ws = os.path.join("..","verification","Freyberg")
gspc_file = os.path.join(ws,"grid.spc")
pp_file = os.path.join(ws,"pp_00_pp.dat")
str_file = os.path.join(ws,"structure.complex.dat")
ppk2fac_facfile = os.path.join(ws,"ppk2fac_fac.dat")
pyemu_facfile = os.path.join("temp","pyemu_facfile.dat")
sr = flopy.utils.SpatialReference.from_gridspec(gspc_file)
ok = pyemu.utils.OrdinaryKrige(str_file,pp_file)
ok.calc_factors_grid(sr,maxpts_interp=10)
ok.to_grid_factors_file(pyemu_facfile)
zone_arr = np.loadtxt(os.path.join(ws,"extra_crispy","ref","ibound.ref"))
pyemu_arr = pyemu.utils.fac2real(pp_file,pyemu_facfile,out_file=None)
ppk2fac_arr = pyemu.utils.fac2real(pp_file,ppk2fac_facfile,out_file=None)
pyemu_arr[zone_arr == 0] = np.NaN
pyemu_arr[zone_arr == -1] = np.NaN
ppk2fac_arr[zone_arr == 0] = np.NaN
ppk2fac_arr[zone_arr == -1] = np.NaN
diff = np.abs(pyemu_arr - ppk2fac_arr)
print(diff)
assert np.nansum(diff) < 1.0e-6,np.nansum(diff)
# def opt_obs_worth():
# import os
# import pyemu
# wdir = os.path.join("utils")
# os.chdir(wdir)
# pst = pyemu.Pst(os.path.join("supply2_pest.fosm.pst"))
# zero_weight_names = [n for n,w in zip(pst.observation_data.obsnme,pst.observation_data.weight) if w == 0.0]
# #print(zero_weight_names)
# #for attr in ["base_jacobian","hotstart_resfile"]:
# # pst.pestpp_options[attr] = os.path.join(wdir,pst.pestpp_options[attr])
# #pst.template_files = [os.path.join(wdir,f) for f in pst.template_files]
# #pst.instruction_files = [os.path.join(wdir,f) for f in pst.instruction_files]
# #print(pst.template_files)
# df = pyemu.optimization.get_added_obs_importance(pst,obslist_dict={"zeros":zero_weight_names})
# os.chdir("..")
# print(df)
def mflist_budget_test():
import pyemu
import os
import pandas as pd
try:
import flopy
except:
print("no flopy...")
return
model_ws = os.path.join("..","examples","Freyberg_transient")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False,load_only=[])
list_filename = os.path.join(model_ws,"freyberg.list")
assert os.path.exists(list_filename)
df = pyemu.gw_utils.setup_mflist_budget_obs(list_filename,start_datetime=ml.start_datetime)
print(df)
times = df.loc[df.index.str.startswith('vol_wells')].index.str.split(
'_', expand=True).get_level_values(2)[::100]
times = pd.to_datetime(times, yearfirst=True)
df = pyemu.gw_utils.setup_mflist_budget_obs(
list_filename, start_datetime=ml.start_datetime, specify_times=times)
flx, vol = pyemu.gw_utils.apply_mflist_budget_obs(
list_filename, 'flux.dat', 'vol.dat', start_datetime=ml.start_datetime,
times='budget_times.config'
)
assert (flx.index == vol.index).all()
assert (flx.index == times).all()
def mtlist_budget_test():
import pyemu
import pandas as pd
import os
try:
import flopy
except:
print("no flopy...")
return
list_filename = os.path.join("utils","mt3d.list")
assert os.path.exists(list_filename)
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970')
assert len(ins_files) == 2
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970', gw_prefix='')
assert len(ins_files) == 2
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime=None)
assert len(ins_files) == 2
list_filename = os.path.join("utils", "mt3d_imm_sor.lst")
assert os.path.exists(list_filename)
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime='1-1-1970')
def geostat_prior_builder_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
# print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{str_file:tpl_file})
d1 = np.diag(cov.x)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{gs:df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
d2 = np.diag(cov.x)
assert np.array_equiv(d1, d2)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
cov = pyemu.helpers.geostatistical_prior_builder(pst, {gs: df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
cov = pyemu.helpers.geostatistical_prior_builder(pst, {str_file: tpl_file})
assert cov.shape[0] == pst.npar_adj
def geostat_draws_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
pe = pyemu.helpers.geostatistical_draws(pst_file,{str_file:tpl_file})
assert (pe.shape == pe.dropna().shape)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
pe = pyemu.helpers.geostatistical_draws(pst_file,{gs:df},
sigma_range=4)
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
pst.parameter_data.loc[pst.par_names[1:10],"partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
# def linearuniversal_krige_test():
# try:
# import flopy
# except:
# return
#
# import numpy as np
# import pandas as pd
# import pyemu
# nrow,ncol = 10,5
# delr = np.ones((ncol)) * 1.0/float(ncol)
# delc = np.ones((nrow)) * 1.0/float(nrow)
#
# num_pts = 0
# ptx = np.random.random(num_pts)
# pty = np.random.random(num_pts)
# ptname = ["p{0}".format(i) for i in range(num_pts)]
# pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
# pts_data.index = pts_data.name
# pts_data = pts_data.loc[:,["x","y","name"]]
#
#
# sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
# pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
# pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
# pts_data.loc["i0j0","value"] = 1.0
# pts_data.loc["imxjmx","value"] = 0.0
#
# str_file = os.path.join("utils","struct_test.dat")
# gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
# luk = pyemu.utils.geostats.LinearUniversalKrige(gs,pts_data)
# df = luk.estimate_grid(sr,verbose=True,
# var_filename=os.path.join("utils","test_var.ref"),
# minpts_interp=1)
def gslib_2_dataframe_test():
import os
import pyemu
gslib_file = os.path.join("utils","ch91pt.shp.gslib")
df = pyemu.geostats.gslib_2_dataframe(gslib_file)
print(df)
def sgems_to_geostruct_test():
import os
import pyemu
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
def load_sgems_expvar_test():
import os
import numpy as np
#import matplotlib.pyplot as plt
import pyemu
dfs = pyemu.geostats.load_sgems_exp_var(os.path.join("utils","ch00_expvar"))
xmn,xmx = 1.0e+10,-1.0e+10
for d,df in dfs.items():
xmn = min(xmn,df.x.min())
xmx = max(xmx,df.x.max())
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
v = gs.variograms[0]
#ax = gs.plot(ls="--")
#plt.show()
#x = np.linspace(xmn,xmx,100)
#y = v.inv_h(x)
#
#plt.plot(x,y)
#plt.show()
def read_hydmod_test():
import os
import numpy as np
import pandas as pd
import pyemu
try:
import flopy
except:
return
df, outfile = pyemu.gw_utils.modflow_read_hydmod_file(os.path.join('utils','freyberg.hyd.bin'),
os.path.join('temp','freyberg.hyd.bin.dat'))
df = pd.read_csv(os.path.join('temp', 'freyberg.hyd.bin.dat'), delim_whitespace=True)
dftrue = pd.read_csv(os.path.join('utils', 'freyberg.hyd.bin.dat.true'), delim_whitespace=True)
assert np.allclose(df.obsval.values, dftrue.obsval.values)
def make_hydmod_insfile_test():
import os
import shutil
import pyemu
try:
import flopy
except:
return
shutil.copy2(os.path.join('utils','freyberg.hyd.bin'),os.path.join('temp','freyberg.hyd.bin'))
pyemu.gw_utils.modflow_hydmod_to_instruction_file(os.path.join('temp','freyberg.hyd.bin'))
#assert open(os.path.join('utils','freyberg.hyd.bin.dat.ins'),'r').read() == open('freyberg.hyd.dat.ins', 'r').read()
assert os.path.exists(os.path.join('temp','freyberg.hyd.bin.dat.ins'))
def plot_summary_test():
import os
import pandas as pd
import pyemu
try:
import matplotlib.pyplot as plt
except:
return
par_df = pd.read_csv(os.path.join("utils","freyberg_pp.par.usum.csv"),
index_col=0)
idx = list(par_df.index.map(lambda x: x.startswith("HK")))
par_df = par_df.loc[idx,:]
ax = pyemu.plot_utils.plot_summary_distributions(par_df,label_post=True)
plt.savefig(os.path.join("temp","hk_par.png"))
plt.close()
df = os.path.join("utils","freyberg_pp.pred.usum.csv")
figs,axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
#plt.show()
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_pred_{0}.png".format(i)))
plt.close(fig)
df = os.path.join("utils","freyberg_pp.par.usum.csv")
figs, axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_par_{0}.png".format(i)))
plt.close(fig)
def hds_timeseries_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
model_ws =os.path.join("..","examples","Freyberg_transient")
org_hds_file = os.path.join(model_ws, "freyberg.hds")
hds_file = os.path.join("temp", "freyberg.hds")
org_cbc_file = org_hds_file.replace(".hds",".cbc")
cbc_file = hds_file.replace(".hds", ".cbc")
shutil.copy2(org_hds_file, hds_file)
shutil.copy2(org_cbc_file, cbc_file)
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, check=False)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1), "test": (0, 10, 14)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
# m.change_model_ws("temp",reset_external=True)
# m.write_input()
# pyemu.os_utils.run("mfnwt freyberg.nam",cwd="temp")
cmd, df1 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, include_path=True, prefix="stor",
text="storage", fill=0.0)
cmd,df2 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="stor",
text="storage",fill=0.0)
print(df1)
d = np.abs(df1.obsval.values - df2.obsval.values)
print(d.max())
assert d.max() == 0.0,d
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="constant head")
except:
pass
else:
raise Exception("should have failed")
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="JUNK")
except:
pass
else:
raise Exception("should have failed")
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True,prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,load_only=[],check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict,model=m,include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True,prefix="hds")
org_hds_file = os.path.join("utils", "MT3D001.UCN")
hds_file = os.path.join("temp", "MT3D001.UCN")
shutil.copy2(org_hds_file, hds_file)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True, prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, load_only=[], check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True, prefix="hds")
# df1 = pd.read_csv(out_file, delim_whitespace=True)
# pyemu.gw_utils.apply_hds_obs(hds_file)
# df2 = pd.read_csv(out_file, delim_whitespace=True)
# diff = df1.obsval - df2.obsval
def grid_obs_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
m_ws = os.path.join("..", "examples", "freyberg_sfr_update")
org_hds_file = os.path.join("..","examples","Freyberg_Truth","freyberg.hds")
org_multlay_hds_file = os.path.join(m_ws, "freyberg.hds") # 3 layer version
org_ucn_file = os.path.join(m_ws, "MT3D001.UCN") # mt example
hds_file = os.path.join("temp","freyberg.hds")
multlay_hds_file = os.path.join("temp", "freyberg_3lay.hds")
ucn_file = os.path.join("temp", "MT3D001.UCN")
out_file = hds_file+".dat"
multlay_out_file = multlay_hds_file+".dat"
ucn_out_file = ucn_file+".dat"
shutil.copy2(org_hds_file,hds_file)
shutil.copy2(org_multlay_hds_file, multlay_hds_file)
shutil.copy2(org_ucn_file, ucn_file)
pyemu.gw_utils.setup_hds_obs(hds_file)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert abs(diff.max()) < 1.0e-6, abs(diff.max())
pyemu.gw_utils.setup_hds_obs(multlay_hds_file)
df1 = pd.read_csv(multlay_out_file,delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval,df2.obsval), abs(diff.max())
pyemu.gw_utils.setup_hds_obs(hds_file,skip=-999)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
pyemu.gw_utils.setup_hds_obs(ucn_file, skip=1.e30, prefix='ucn')
df1 = pd.read_csv(ucn_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(ucn_file)
df2 = pd.read_csv(ucn_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
# skip = lambda x : x < -888.0
skip = lambda x: x if x > -888.0 else np.NaN
pyemu.gw_utils.setup_hds_obs(hds_file,skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
kperk_pairs = (0,0)
pyemu.gw_utils.setup_hds_obs(hds_file,kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
kperk_pairs = [(0, 0), (0, 1), (0, 2)]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = | pd.read_csv(multlay_out_file, delim_whitespace=True) | pandas.read_csv |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == | Timestamp('1970-01-01') | pandas.Timestamp |
# The script generates the Dataframes with the VC, PBLH, u_mean.
import BoundaryLayerToolbox as blt
import pandas as pd
import numpy as np
import h5py
import os
import shutil
path2wrf = '/Volumes/BUFFALO_SOLDIER/datos_VC/'
path2DataFrames = "../datos/dataframes_VC/"
path2pollutants = "../datos/contaminantes/2015/"
months = {'jan': '01',
'feb': '02',
'mar': '03',
'apr': '04',
'may': '05',
'jun': '06',
'jul': '07',
'aug': '08',
'sep': '09',
'oct': '10',
'nov': '11',
'dic': '12'}
def E1or30(month):
if month in ['jan', 'mar', 'may', 'jul', 'aug', 'oct', 'dic']:
return '31'
elif month in ['apr', 'jun', 'sep', 'jul', 'nov']:
return '30'
elif month == 'feb':
return '28'
location = ['MER', 'PED', 'SAG', 'TLA', 'UIZ', 'SFE']
stations = | pd.read_csv('../datos/Stations_Info.csv', index_col=0) | pandas.read_csv |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Make dataset for the End-to-End model (CSJ corpus).
Note that feature extraction depends on transcripts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile
import sys
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
import pickle
sys.path.append('../')
from csj.path import Path
from csj.input_data import read_audio
from csj.labels.transcript import read_sdb
from utils.util import mkdir_join
from utils.inputs.wav_split import split_wav
from utils.dataset import add_element
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, help='path to CSJ dataset')
parser.add_argument('--dataset_save_path', type=str,
help='path to save dataset')
parser.add_argument('--feature_save_path', type=str,
help='path to save input features')
parser.add_argument('--wav_save_path', type=str,
help='path to save wav files (per utterance)')
parser.add_argument('--tool', type=str,
choices=['htk', 'python_speech_features', 'librosa'])
parser.add_argument('--htk_save_path', type=str, help='path to save features')
parser.add_argument('--normalize', type=str,
choices=['global', 'speaker', 'utterance', 'no'])
parser.add_argument('--save_format', type=str, choices=['numpy', 'htk', 'wav'])
parser.add_argument('--feature_type', type=str, choices=['fbank', 'mfcc'])
parser.add_argument('--channels', type=int,
help='the number of frequency channels')
parser.add_argument('--window', type=float,
help='window width to extract features')
parser.add_argument('--slide', type=float, help='extract features per slide')
parser.add_argument('--energy', type=int, help='if 1, add the energy feature')
parser.add_argument('--delta', type=int, help='if 1, add the energy feature')
parser.add_argument('--deltadelta', type=int,
help='if 1, double delta features are also extracted')
parser.add_argument('--subset', type=int,
help='If True, create small dataset.')
parser.add_argument('--fullset', type=int,
help='If True, create full-size dataset.')
args = parser.parse_args()
path = Path(data_path=args.data_path,
config_path='./config',
htk_save_path=args.htk_save_path)
CONFIG = {
'feature_type': args.feature_type,
'channels': args.channels,
'sampling_rate': 16000,
'window': args.window,
'slide': args.slide,
'energy': bool(args.energy),
'delta': bool(args.delta),
'deltadelta': bool(args.deltadelta)
}
if args.save_format == 'htk':
assert args.tool == 'htk'
def main(data_size):
speaker_dict_dict = {} # dict of speaker_dict
for data_type in ['train', 'eval1', 'eval2', 'eval3']:
print('=' * 50)
print(' ' * 20 + data_type + ' (' + data_size + ')' + ' ' * 20)
print('=' * 50)
########################################
# labels
########################################
if data_type == 'train':
label_paths = path.trans(data_type='train_' + data_size)
else:
label_paths = path.trans(data_type=data_type)
save_vocab_file = True if data_type == 'train' else False
is_test = True if 'eval' in data_type else False
print('=> Processing transcripts...')
speaker_dict_dict[data_type] = read_sdb(
label_paths=label_paths,
data_size=data_size,
vocab_file_save_path=mkdir_join('./config', 'vocab_files'),
save_vocab_file=save_vocab_file,
is_test=is_test,
data_type=data_type)
########################################
# inputs
########################################
print('\n=> Processing input data...')
input_save_path = mkdir_join(
args.feature_save_path, args.save_format, data_size)
if isfile(join(input_save_path, data_type, 'complete.txt')):
print('Already exists.')
else:
if args.save_format == 'wav':
########################################
# Split WAV files per utterance
########################################
if data_type == 'train':
wav_paths = path.wav(corpus='train' + data_size)
else:
wav_paths = path.wav(corpus=data_type)
split_wav(wav_paths=wav_paths,
speaker_dict=speaker_dict_dict[data_type],
save_path=mkdir_join(input_save_path, data_type))
# NOTE: ex.) save_path:
# csj/feature/save_format/data_size/data_type/speaker/utt_name.npy
elif args.save_format in ['numpy', 'htk']:
if data_type == 'train':
if args.tool == 'htk':
audio_paths = path.htk(data_type='train_' + data_size)
else:
audio_paths = path.wav(data_type='train_' + data_size)
is_training = True
global_mean_male, global_std_male, global_mean_female, global_std_female = None, None, None, None
else:
if args.tool == 'htk':
audio_paths = path.htk(data_type=data_type)
else:
audio_paths = path.wav(data_type=data_type)
is_training = False
# Load statistics over train dataset
global_mean_male = np.load(
join(input_save_path, 'train/global_mean_male.npy'))
global_std_male = np.load(
join(input_save_path, 'train/global_std_male.npy'))
global_mean_female = np.load(
join(input_save_path, 'train/global_mean_female.npy'))
global_std_female = np.load(
join(input_save_path, 'train/global_std_female.npy'))
read_audio(audio_paths=audio_paths,
speaker_dict=speaker_dict_dict[data_type],
tool=args.tool,
config=CONFIG,
normalize=args.normalize,
is_training=is_training,
save_path=mkdir_join(input_save_path, data_type),
save_format=args.save_format,
global_mean_male=global_mean_male,
global_std_male=global_std_male,
global_mean_female=global_mean_female,
global_std_female=global_std_female)
# NOTE: ex.) save_path:
# csj/feature/save_format/data_size/data_type/speaker/*.npy
# Make a confirmation file to prove that dataset was saved
# correctly
with open(join(input_save_path, data_type, 'complete.txt'), 'w') as f:
f.write('')
########################################
# dataset (csv)
########################################
print('\n=> Saving dataset files...')
dataset_save_path = mkdir_join(
args.dataset_save_path, args.save_format, data_size, data_type)
df_columns = ['frame_num', 'input_path', 'transcript']
df_kanji = pd.DataFrame([], columns=df_columns)
df_kanji_divide = pd.DataFrame([], columns=df_columns)
df_kana = pd.DataFrame([], columns=df_columns)
df_kana_divide = pd.DataFrame([], columns=df_columns)
df_phone = pd.DataFrame([], columns=df_columns)
df_phone_divide = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
with open(join(input_save_path, data_type, 'frame_num.pickle'), 'rb') as f:
frame_num_dict = pickle.load(f)
utt_count = 0
df_kanji_list, df_kanji_divide_list = [], []
df_kana_list, df_kana_divide_list = [], []
df_phone_list, df_phone_divide_list = [], []
df_word_freq1_list, df_word_freq5_list = [], []
df_word_freq10_list, df_word_freq15_list = [], []
speaker_dict = speaker_dict_dict[data_type]
for speaker, utt_dict in tqdm(speaker_dict.items()):
for utt_index, utt_info in utt_dict.items():
kanji_indices, kanji_divide_indices = utt_info[2:4]
kana_indices, kana_divide_indices = utt_info[4:6]
phone_indices, phone_divide_indices = utt_info[6:8]
word_freq1_indices, word_freq5_indices = utt_info[8:10]
word_freq10_indices, word_freq15_indices = utt_info[10:12]
if args.save_format == 'numpy':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.npy')
elif args.save_format == 'htk':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.htk')
elif args.save_format == 'wav':
input_utt_save_path = path.utt2wav(utt_index)
else:
raise ValueError('save_format is numpy or htk or wav.')
assert isfile(input_utt_save_path)
frame_num = frame_num_dict[speaker + '_' + utt_index]
df_kanji = add_element(
df_kanji, [frame_num, input_utt_save_path, kanji_indices])
df_kanji_divide = add_element(
df_kanji_divide, [frame_num, input_utt_save_path, kanji_divide_indices])
df_kana = add_element(
df_kana, [frame_num, input_utt_save_path, kana_indices])
df_kana_divide = add_element(
df_kana_divide, [frame_num, input_utt_save_path, kana_divide_indices])
df_phone = add_element(
df_phone, [frame_num, input_utt_save_path, phone_indices])
df_phone_divide = add_element(
df_phone_divide, [frame_num, input_utt_save_path, phone_divide_indices])
df_word_freq1 = add_element(
df_word_freq1, [frame_num, input_utt_save_path, word_freq1_indices])
df_word_freq5 = add_element(
df_word_freq5, [frame_num, input_utt_save_path, word_freq5_indices])
df_word_freq10 = add_element(
df_word_freq10, [frame_num, input_utt_save_path, word_freq10_indices])
df_word_freq15 = add_element(
df_word_freq15, [frame_num, input_utt_save_path, word_freq15_indices])
utt_count += 1
# Reset
if utt_count == 10000:
df_kanji_list.append(df_kanji)
df_kanji_divide_list.append(df_kanji_divide)
df_kana_list.append(df_kana)
df_kana_divide_list.append(df_kana_divide)
df_phone_list.append(df_phone)
df_phone_divide_list.append(df_phone_divide)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
df_kanji = pd.DataFrame([], columns=df_columns)
df_kanji_divide = pd.DataFrame([], columns=df_columns)
df_kana = pd.DataFrame([], columns=df_columns)
df_kana_divide = pd.DataFrame([], columns=df_columns)
df_phone = pd.DataFrame([], columns=df_columns)
df_phone_divide = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
utt_count = 0
# Last dataframe
df_kanji_list.append(df_kanji)
df_kanji_divide_list.append(df_kanji_divide)
df_kana_list.append(df_kana)
df_kana_divide_list.append(df_kana_divide)
df_phone_list.append(df_phone)
df_phone_divide_list.append(df_phone_divide)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
# Concatenate all dataframes
df_kanji = df_kanji_list[0]
df_kanji_divide = df_kanji_divide_list[0]
df_kana = df_kana_list[0]
df_kana_divide = df_kana_divide_list[0]
df_phone = df_phone_list[0]
df_phone_divide = df_phone_divide_list[0]
df_word_freq1 = df_word_freq1_list[0]
df_word_freq5 = df_word_freq5_list[0]
df_word_freq10 = df_word_freq10_list[0]
df_word_freq15 = df_word_freq15_list[0]
for df_i in df_kanji_list[1:]:
df_kanji = pd.concat([df_kanji, df_i], axis=0)
for df_i in df_kanji_divide_list[1:]:
df_kanji_divide = pd.concat([df_kanji_divide, df_i], axis=0)
for df_i in df_kana_list[1:]:
df_kana = pd.concat([df_kana, df_i], axis=0)
for df_i in df_kana_divide_list[1:]:
df_kana_divide = pd.concat([df_kana_divide, df_i], axis=0)
for df_i in df_phone_list[1:]:
df_phone = | pd.concat([df_phone, df_i], axis=0) | pandas.concat |
'''
Spectrum processing for the determination of thickness and optical constants program in python3, ver 1.0.2.1.
<NAME>. published under MIT license.
Peak detection program inspired by <NAME>, https://github.com/demotu/BMC under MIT license.
For more information, please visit https://github.com/hitmesstech/spop
'''
#%%
import numpy as np
import scipy
import pandas as pandas
from scipy import interpolate
import matplotlib.pyplot as plt
import tkinter as tk
jl='tmestte'
jr='CSV表格文件'
jt="*.csv"
from tkinter import filedialog
js='am b'+'yhi'
from tkinter import simpledialog
root = tk.Tk()
jl=jl+'ch@github'
from tkinter import messagebox
root.withdraw()
#messagebox.showinfo('Spectrum Test Program b'+'yTeR'+'enL'+'iu', 'showinfo');
#win32ui.MessageBox('请在命令行窗口中输入衬底折射率')
#print("Please Input substrates' index of refraction:");
#m=float(input());
#m=4.3;
#import tkFileDialog as tkFileDialog
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
dx = x[1:] - x[:-1]
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
if ind.size and indnan.size:
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1]
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0
ind = np.sort(ind[~idel])
return ind
def tliu_spline(x0,y0,r):
return interpolate.UnivariateSpline(x0[r],y0[r])
#%%
js='Spectrum Progr'+js;
m=simpledialog.askfloat(js+jl, '请输入衬底折射率');
#file_path = filedialog.askopenfilename()
s=filedialog.askopenfilename(filetypes=( (jr, jt),("All files", "*.*")))
#dlg = win32ui.CreateFileDialog(bFileOpen=1,filter='CSV文本表格文件(*.csv)|*.csv|',flags=win32con.OFN_HIDEREADONLY)
#dlg.DoModal();
#s=dlg.GetPathName();
#print(s);
spec=pandas.read_table(s,engine='python',sep='[^0-9\.]+',skiprows=1);
#tck = interpolate.splrep(spec.iloc[:,0].T.values[peaks],spec.iloc[:,1].T.values[peaks],5)
#SplineMin = interpolate.splev(x, tck, der=0)
#SplineMin=interpolate.spline(spec.iloc[:,0].T.values[peaks],spec.iloc[:,1].T.values[peaks],x,order=1);
#SplineMax=scipy.spline(spec[0][valleys],spec[1][valleys],x,order=1);#SplineMax=scipy.spline(spec[0][valleys],spec[1][valleys],x,order=1);
peaks=detect_peaks(spec.iloc[:,1].T.values,mpd=10);
valleys=detect_peaks(spec.iloc[:,1].T.values,mpd=10,valley=True);
x=spec.iloc[:,0].T.values;
peakspline=tliu_spline(spec.iloc[:,0].T.values,spec.iloc[:,1].T.values,peaks)(spec.iloc[:,0].T.values);
valleyspline=tliu_spline(spec.iloc[:,0].T.values,spec.iloc[:,1].T.values,valleys)(spec.iloc[:,0].T.values);
Mr=2*m*(peakspline-valleyspline)/(peakspline*valleyspline)+(m*m+1)/2;
nsample=np.sqrt( Mr+np.sqrt(Mr*Mr-m*m) );
#plt.figure(1);
plt.subplot(311);
plt.plot(x,spec.iloc[:,1].T.values,label='spectrum(a.u)');
plt.plot(x,peakspline,label='Tmax spline');
plt.plot(x,valleyspline,label='Tmin spline');
plt.legend();
#plt.draw();
#plt.figure(2)
plt.subplot(312);
plt.plot(x,nsample,label='n($\lambda$)(a.u.)');
plt.legend();
#plt.draw();
#d=float(input());
E=8*nsample*nsample*m/np.array(peakspline) +(nsample*nsample-1)*(nsample*nsample-m*m);
#plt.figure(3);
plt.subplot(313);
xspec=( E-np.power( (E*E-np.power(3,nsample*nsample-1)*(nsample*nsample-m*m*m*m) ), 2 ) )/np.array( np.power(3,nsample*nsample-1)*(nsample-m*m) );
#alpha=numpy.log(xspc)/d;
plt.plot(x,xspec,label='real absorption rate X(a.u.)');
plt.legend();
plt.draw();
rc = messagebox.askyesno(js+'\niu', '是否保存?');
if rc:
#dls = win32ui.CreateFileDialog(bFileOpen=2,filter='CSV文本表格文件(*.csv)|*.csv|')
#dls.DoModal();
s=filedialog.asksaveasfilename(title=js+jl,filetypes=( ('CSV文本表格文件','*.csv'),),defaultextension='.csv');
t= | pandas.DataFrame([x,xspec]) | pandas.DataFrame |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import pandas as pd
import glob
import numpy as np
from utils import *
import requests
import datetime as dt
import io
import sys
"""
Los productos que salen del registro civil son:
31
"""
def normalizeRegCivDF(df):
# rename columns
df.rename(columns={'REGION': 'Region', 'COMUNA': 'Comuna'}, inplace=True)
# title case => Title Case
df['Comuna'] = df['Comuna'].str.title()
regionNameRegex(df)
regionName(df)
# zero pad fechas
df['MES'] = df['MES'].astype(str).apply(lambda x: x.zfill(2))
df['DIA'] = df['DIA'].astype(str).apply(lambda x: x.zfill(2))
# standard fecha
df["Fecha"] = df["AÑO"].astype(str) + '-' + df["MES"].astype(str) + '-' + df["DIA"].astype(str)
df = df.drop(columns={'AÑO', 'MES', 'DIA'})
# handle duplicates
df['TOTAL'] = df.groupby(['Region', 'Comuna', 'Fecha'])['TOTAL'].transform('sum')
df.drop_duplicates(inplace=True)
df = normalizaNombreCodigoRegionYComuna(df)
df.sort_values(by=['Codigo region', 'Codigo comuna', 'Fecha'], na_position='first', inplace=True)
return df
def prod31_32(fte, prod):
data = []
outputPrefix = ''
if 'producto31' in prod:
outputPrefix = 'Nacimientos'
for file in glob.glob(fte + 'Nacimientos/*.xlsx'):
if '_DO' not in file:
df = pd.read_excel(file)
# rename columns
df.rename(columns={'REGION': 'Region', 'COMUNA': 'Comuna'}, inplace=True)
# title case => Title Case
df['Comuna'] = df['Comuna'].str.title()
regionNameRegex(df)
regionName(df)
# zero pad fechas
df['MES'] = df['MES'].astype(str).apply(lambda x: x.zfill(2))
df['DIA'] = df['DIA'].astype(str).apply(lambda x: x.zfill(2))
# standard fecha
df["Fecha"] = df["AÑO"].astype(str) + '-' + df["MES"].astype(str) + '-' + df["DIA"].astype(str)
df = df.drop(columns={'AÑO', 'MES', 'DIA'})
# handle duplicates
df['TOTAL'] = df.groupby(['Region', 'Comuna', 'Fecha'])['TOTAL'].transform('sum')
df.drop_duplicates(inplace=True)
if 'Nacimientos' in file:
df = df.rename(columns={'TOTAL': 'Nacimientos'})
data.append(df)
if 'producto32' in prod:
outputPrefix = 'Defunciones'
for file in glob.glob(fte + 'Defunciones/*.xlsx'):
if '_DO' not in file:
df = | pd.read_excel(file) | pandas.read_excel |
from sklearn.externals import joblib
import pandas as pd
import xgboost as xgb
def feature_engineering(raw_data):
for index,col in raw_data.iterrows():
if (col['Temp']>38.5 or col['Temp']<36) and (col['HR']>90) :
raw_data.at[index,'feature1']=1
else:
raw_data.at[index,'feature1']=0
if col['Platelets']<100:
raw_data.at[index,'feature2']=1
else:
raw_data.at[index,'feature2']=0
sel_columns=['HR', 'O2Sat', 'SBP', 'MAP', 'DBP', 'Resp', 'Age', 'Gender', 'Unit1',
'Unit2', 'HospAdmTime', 'ICULOS','feature1','feature2']
sel_data=raw_data[sel_columns]
data=xgb.DMatrix(sel_data,feature_names=sel_columns)
return data
def load_sepsis_model():
loaded_model = joblib.load('xgboost_v2.model')
return loaded_model
def get_sepsis_score(data,model):
org_feature = ['HR', 'O2Sat', 'Temp', 'SBP', 'MAP', 'DBP', 'Resp', 'EtCO2',
'BaseExcess', 'HCO3', 'FiO2', 'pH', 'PaCO2', 'SaO2', 'AST', 'BUN',
'Alkalinephos', 'Calcium', 'Chloride', 'Creatinine', 'Bilirubin_direct',
'Glucose', 'Lactate', 'Magnesium', 'Phosphate', 'Potassium',
'Bilirubin_total', 'TroponinI', 'Hct', 'Hgb', 'PTT', 'WBC',
'Fibrinogen', 'Platelets', 'Age', 'Gender', 'Unit1', 'Unit2',
'HospAdmTime', 'ICULOS']
raw_cur_test = | pd.DataFrame(data,columns=org_feature) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # US Beveridge Curve Data
#
# Construct monthly unemploment rate and vacancy rate series for the US from April 1929 through the most recently available date. The methodology is based on the approach described in Petrosky-Nadeau and Zhang (2013): https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2241695
#
# 1. This Notebook is compatible with Python 2 and 3.
#
# 2. **To use this notebook to download the entire dataset, you need the X-13ARIMA-SEATS binary**. If you don't have the binary, set variable `x_13` to `False`. Data that require seasonal adjustment will be loaded from the `txt` directory of the parent directory to this program.
#
# Binaries for Windows and Linux/Unix are available from https://www.census.gov/srd/www/x13as/. To compile X-13 for Mac OS X, see the instructions here: https://github.com/christophsax/seasonal/wiki/Compiling-X-13ARIMA-SEATS-from-Source-for-OS-X.
# In[1]:
import statsmodels as sm
import fredpy as fp
import matplotlib.pyplot as plt
plt.style.use('classic')
import numpy as np
import pandas as pd
import os,urllib
import warnings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
# You must change XPATH if you are running this script from anywhere other than the directory containing x13as.
XPATH = os.getcwd()
# Load fredpy api key
fp.api_key = fp.load_api_key('fred_api_key.txt')
# Whether x13 binary is available
x_13 = False
# ## Unemployment Rate
#
# Construct an unemployment series from April 1929 through the most recent date available by concatenating four U.S. unemployment rate series; all of which are available from FRED (https://fred.stlouisfed.org/). Specifically:
#
# 1. Seasonally adjusted unemployment rate for the United States from April 1929 through February 1940. FRED series ID: M0892AUSM156SNBR. NBER Indicator: m08292a.
# 2. Seasonally adjusted unemployment rate for the United States from March 1940 through December 1946. FRED series ID: M0892BUSM156SNBR. NBER Indicator: m08292b.
# 3. Seasonally adjusted unemployment rate for the United States from January 1947 through December 1947. FRED series ID: M0892CUSM156NNBR. NBER Indicator: m08292c. Note: The source data are not seasonally adjusted and contain observations through December 1966. Seasonally adjust the entire series through December 1966 using the U.S. Census Bureau's X-13-ARIMA seasonal adjustment program. Then discard values after December 1947. *Only downloaded if `x_13 == True.`*
# 4. Seasonally adjusted unemployment rate for the United States from January 1948 through the most recent date available. FRED series ID: UNRATE.
# In[2]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1929-04-01 to 1940-02-01;
# Seasonally adjusted
# Download from FRED and save as a Pandas series
unemp_1 = fp.series('M0892AUSM156SNBR')
unemp_1 = unemp_1.window(['04-01-1929','02-01-1940']).data
# In[3]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1940-03-01 to 1946-12-01;
# Seasonally adjusted
# Download from FRED and save as a Pandas series
unemp_2 = fp.series('M0892BUSM156SNBR')
unemp_2 = unemp_2.window(['03-01-1940','12-01-1946']).data
# In[4]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1947-01-01 to 1966-12-01;
# Raw series is *not* seasonally adjusted
if x_13:
# Download from FRED
unemp_3 = fp.series('M0892CUSM156NNBR')
unemp_3 = unemp_3.window(['01-01-1947','12-01-1966']).data
# Run x13_arima_analysis to obtain SA unemployment data.
x13results = sm.tsa.x13.x13_arima_analysis(endog = unemp_3,x12path=XPATH, outlier=False,print_stdout=True)
unemp_3 = pd.Series(x13results.seasadj.values,index=unemp_3.index)
unemp_3 = unemp_3[(unemp_3.index>=pd.to_datetime('01-01-1947')) & (unemp_3.index<=pd.to_datetime('12-01-1947'))]
# Export the series to txt
unemp_3.to_csv('../txt/unemployment_1947.txt',sep='\t')
else:
# Import data
unemp_3 = pd.read_csv('../txt/unemployment_1947.txt',sep='\t',index_col=0,parse_dates=True)['0']
# In[5]:
# US civilian unemployment rate from the BLS: 1948-01-01 to most recent;
# Seasonally adjusted
unemp_4 = fp.series('UNRATE')
unemp_4 = unemp_4.window(['01-01-1948','01-01-2200']).data
# In[6]:
# Concatenate the first three series
unemployment_rate_series = unemp_1.append(unemp_2).sort_index()
unemployment_rate_series = unemployment_rate_series.append(unemp_3).sort_index()
unemployment_rate_series = unemployment_rate_series.append(unemp_4).sort_index()
# plot the series and save the figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(unemployment_rate_series,'-',lw=4,alpha = 0.65)
ax.set_ylabel('Percent')
ax.grid()
fig.tight_layout()
plt.savefig('../png/fig_data_unrate.png',bbox_inches='tight',dpi=120)
#
# ## Vacancies (Job openings)
#
# Construct a series of vacancies for the United States going back to April 1929 by scaling and concatenating three series:
# 1. Help-wanted advertising in newspapers index for United States from April 1929 to January 1960. FRED series ID: M0882AUSM349NNBR. NBER Indicator: m08082a. Note: The source data are not seasonally adjusted and contain observations through August 1960. Seasonally adjust the entire series through August 1960 using the United States Census Bureau's X-13-ARIMA seasonal adjustment program. Then discard values after January 1960. *Only downloaded if `x_13 == True.`*
# 2. Composite help-wanted index from January 1960 through January 2001 constructed using the method described in and Barnichon (2010). The data are from Barnichon's website https://sites.google.com/site/regisbarnichon/data. Scale this series so that its value in January 1960 equals the value of the NBER's help-wanted index for the same date.
# 3. Job openings, total nonfarm for the United States from January 2001 to the most recent date available. FRED series ID: JTSJOL. Scale this series so that its value in January 2001 equals the value of the scaled help-wanted index from Barnichon for the same date.
# In[7]:
if x_13:
# Met life help-wanted index: 1919-01-01 to 1960-08-01;
# Not seasonally adjusted
vac_1 = fp.series('M0882AUSM349NNBR').data
# temp_series = pd.Series(vac_1.data,index=pd.to_datetime(vac_1.dates))
# Run x13_arima_analysis to obtain SA vacancy rate data.
x13results = sm.tsa.x13.x13_arima_analysis(endog = vac_1,x12path=XPATH, outlier=False,print_stdout=True)
vac_1 = pd.Series(x13results.seasadj.values,index=vac_1.index)
vac_1 = vac_1[(vac_1.index>=pd.to_datetime('04-01-1929')) ]
# Export the series to txt
vac_1.to_csv('../txt/vacancies_1929-1960.txt',sep='\t')
else:
vac_1 = pd.read_csv('../txt/vacancies_1929-1960.txt',sep='\t',index_col=0,parse_dates=True)['0']
# In[8]:
# Composite help-wanted index from Regis Barnichon's site: https://sites.google.com/site/regisbarnichon;
# Seasonally adjusted
# Import data from Regis Barnichon's site
dls = 'https://sites.google.com/site/regisbarnichon/cv/HWI_index.txt?attredirects=0'
try:
urllib.urlretrieve(dls, '../txt/HWI_index.txt')
except:
try:
urllib.request.urlretrieve(dls, '../txt/HWI_index.txt')
except:
print('HWI_index.txt is no longer available at given URL')
vac_2 = | pd.read_csv('../txt/HWI_index.txt',delimiter='\t',skiprows=6) | pandas.read_csv |
# Make a plot of age vs J_z for Kepler-TGAS.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py
import os
from gyro import gyro_age
from actions import action
from granola import get_properties
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 13,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def calc_dispersion(age, jz, nbins):
hist_age, bins = np.histogram(age, nbins) # make histogram
dispersions, Ns, means = [], [], []
m = age < bins[0]
dispersions.append(RMS(jz[m]))
means.append(np.median(jz[m]))
Ns.append(len(age[m]))
for i in range(len(bins)-1):
m = (bins[i] < age) * (age < bins[i+1])
if len(age[m]):
dispersions.append(RMS(jz[m]))
Ns.append(len(age[m]))
means.append(np.median(jz[m]))
return bins, np.array(dispersions), np.array(Ns), np.array(means)
def RMS(x):
# return (np.median(x**2))**.5
return np.std(x)**2
def dispersion(ages, Jzs, minage, maxage):
"""
Dispersion in a single bin.
"""
m = (minage < ages) * (ages < maxage)
return RMS(Jzs[m]), len(ages[m])
def x_and_y(ages, Jzs):
xs = np.linspace(min(ages), max(ages), 1000)
ys = []
for x in xs:
y, N = dispersion(ages, Jzs, x-.5, x+.5)
ys.append(y)
return xs, ys
if __name__ == "__main__":
DATA_DIR = "/Users/ruthangus/granola/granola/data"
d = pd.read_csv("data/ages_and_actions.csv")
# d = pd.read_csv("ages_and_actions_vansaders.csv")
m = (d.age.values > 0) * (d.age.values < 14)
df = d.iloc[m]
ages, dispersions, Ns, means = calc_dispersion(df.age.values, df.Jz.values, 8)
d_err = dispersions / (2 * Ns - 2)**.5
print(dispersions[:10], means[:10])
assert 0
plt.clf()
# plt.errorbar(ages - .5*(ages[1] - ages[0]), np.array(dispersions),
# yerr=d_err, fmt="k.", capsize=0, ms=.1)
# plt.step(ages, dispersions, color="k")
plt.step(ages, means, color="k")
plt.errorbar(ages - .5*(ages[1] - ages[0]), np.array(means),
yerr=d_err, fmt="k.", capsize=0, ms=.1)
plt.xlabel("$\mathrm{Age~Gyr}$")
plt.ylabel("$\sigma J_z~(\mathrm{Kpc~kms}^{-1})$")
plt.savefig("linear_age_dispersion.pdf")
# plt.savefig("linear_age_dispersion_vansaders.pdf")
m = np.log(df.age.values) > - 1
lnages, dispersions, Ns, means = calc_dispersion(np.log10(df.age.values[m]),
df.Jz.values[m], 8)
d_err = dispersions / (2 * Ns - 2)**.5
plt.clf()
plt.errorbar(lnages - .5*(lnages[1] - lnages[0]), np.array(dispersions),
yerr=d_err, fmt="k.", capsize=0, ms=.1)
plt.step(lnages, dispersions, color="k")
# plt.errorbar(lnages - .5*(lnages[1] - lnages[0]), np.array(means),
# yerr=d_err, fmt="k.", capsize=0, ms=.1)
# plt.step(lnages, means, color="k")
plt.xlabel("$\log_{10}(\mathrm{Age,~Gyr})$")
plt.ylabel("$\sigma J_z~(\mathrm{Kpc~kms}^{-1})$")
# plt.xlim(-1, 2.6)
plt.subplots_adjust(left=.15, bottom=.15)
plt.savefig("log_age_dispersion.pdf")
# plt.savefig("log_age_dispersion_vansaders.pdf")
m = np.log(df.age.values) > - 1
x, y = x_and_y(np.log(df.age.values[m]), df.Jz.values[m])
plt.clf()
plt.plot(x, y)
plt.savefig("cont_age_dispersion.pdf")
# plt.savefig("cont_age_dispersion_vansaders.pdf")
"""
Plot vansaders model and barnes model on the same axes.
"""
DATA_DIR = "/Users/ruthangus/granola/granola/data"
d1 = | pd.read_csv("data/ages_and_actions.csv") | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import unittest
import pandas as pd # type: ignore[import]
from pyspark import SparkConf
from pyspark.sql import Row
from pyspark.sql.utils import AnalysisException
from repair.costs import Levenshtein
from repair.misc import RepairMisc
from repair.model import FunctionalDepModel, RepairModel, PoorModel
from repair.detectors import ConstraintErrorDetector, NullErrorDetector, RegExErrorDetector
from repair.tests.requirements import have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from repair.tests.testutils import Eventually, ReusedSQLTestCase, load_testdata
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
class RepairModelTests(ReusedSQLTestCase):
@classmethod
def conf(cls):
return SparkConf() \
.set("spark.master", "local[*]") \
.set("spark.driver.memory", "4g") \
.set("spark.jars", os.getenv("REPAIR_API_LIB")) \
.set("spark.sql.cbo.enabled", "true") \
.set("spark.sql.statistics.histogram.enabled", "true") \
.set("spark.sql.statistics.histogram.numBins", "254")
@classmethod
def setUpClass(cls):
super(RepairModelTests, cls).setUpClass()
# Tunes # shuffle partitions
num_parallelism = cls.spark.sparkContext.defaultParallelism
cls.spark.sql(f"SET spark.sql.shuffle.partitions={num_parallelism}")
# Loads/Defines some test data
load_testdata(cls.spark, "adult.csv").createOrReplaceTempView("adult")
load_testdata(cls.spark, "adult_dirty.csv").createOrReplaceTempView("adult_dirty")
load_testdata(cls.spark, "adult_repair.csv").createOrReplaceTempView("adult_repair")
load_testdata(cls.spark, "adult_clean.csv").createOrReplaceTempView("adult_clean")
rows = [
(1, 0, 1.0, 1.0, 'a'),
(2, 1, 1.5, 1.5, 'b'),
(3, 0, 1.4, None, 'b'),
(4, 1, 1.3, 1.3, 'b'),
(5, 1, 1.2, 1.1, 'b'),
(6, 1, 1.1, 1.2, 'b'),
(7, 0, None, 1.4, 'b'),
(8, 1, 1.4, 1.0, 'b'),
(9, 0, 1.2, 1.1, 'b'),
(10, None, 1.3, 1.2, 'b'),
(11, 0, 1.0, 1.9, 'b'),
(12, 0, 1.9, 1.2, 'b'),
(13, 0, 1.2, 1.3, 'b'),
(14, 0, 1.8, 1.2, None),
(15, 0, 1.3, 1.1, 'b'),
(16, 1, 1.3, 1.0, 'b'),
(17, 0, 1.3, 1.0, 'b')
]
cls.spark.createDataFrame(rows, ["tid", "v1", "v2", "v3", "v4"]) \
.createOrReplaceTempView("mixed_input")
# Define some expected results
cls.expected_adult_result = cls.spark.table("adult_repair") \
.orderBy("tid", "attribute").collect()
cls.expected_adult_result_without_repaired = cls.spark.table("adult_repair") \
.selectExpr("tid", "attribute", "current_value") \
.orderBy("tid", "attribute").collect()
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
def test_invalid_params(self):
self.assertRaisesRegexp(
ValueError,
"`setInput` and `setRowId` should be called before repairing",
lambda: RepairModel().run())
self.assertRaisesRegexp(
ValueError,
"`setInput` and `setRowId` should be called before repairing",
lambda: RepairModel().setTableName("dummyTab").run())
self.assertRaisesRegexp(
ValueError,
"`setInput` and `setRowId` should be called before repairing",
lambda: RepairModel().setInput("dummyTab").run())
self.assertRaisesRegexp(
ValueError,
"Can not specify a database name when input is `DataFrame`",
lambda: RepairModel().setInput(self.spark.table("adult"))
.setDbName("default").run())
self.assertRaisesRegexp(
ValueError,
"`setRepairDelta` should be called before maximal likelihood repairing",
lambda: RepairModel().setTableName("dummyTab").setRowId("dummyId")
.setMaximalLikelihoodRepairEnabled(True).run())
self.assertRaisesRegexp(
ValueError,
"`setRepairDelta` should be called before maximal likelihood repairing",
lambda: RepairModel().setInput("dummyTab").setRowId("dummyId")
.setMaximalLikelihoodRepairEnabled(True).run())
self.assertRaisesRegexp(
ValueError,
"`attrs` has at least one attribute",
lambda: RepairModel().setTargets([]))
self.assertRaisesRegexp(
ValueError,
"threshold must be bigger than 1",
lambda: RepairModel().setDiscreteThreshold(1))
def test_exclusive_params(self):
def _assert_exclusive_params(func):
self.assertRaisesRegexp(ValueError, "cannot be set to True simultaneously", func)
test_model = RepairModel()
api = test_model.setTableName("dummyTab").setRowId("dummyId")
_assert_exclusive_params(
lambda: api.run(detect_errors_only=True, compute_repair_candidate_prob=True))
_assert_exclusive_params(
lambda: api.run(detect_errors_only=True, repair_data=True))
_assert_exclusive_params(
lambda: api.run(compute_repair_candidate_prob=True, repair_data=True))
_assert_exclusive_params(
lambda: api.run(compute_repair_candidate_prob=True, compute_repair_prob=True))
_assert_exclusive_params(
lambda: api.run(compute_repair_candidate_prob=True, compute_repair_score=True))
def test_argtype_check(self):
self.assertRaises(
TypeError,
"`db_name` should be provided as str, got int",
lambda: RepairModel().setDbName(1))
self.assertRaises(
TypeError,
"`table_name` should be provided as str, got int",
lambda: RepairModel().setTableName(1))
self.assertRaises(
TypeError,
"`thres` should be provided as int, got str",
lambda: RepairModel().setDiscreteThreshold("a"))
self.assertRaises(
TypeError,
"`thres` should be provided as float, got int",
lambda: RepairModel().setMinCorrThreshold(1))
self.assertRaises(
TypeError,
"`beta` should be provided as float, got int",
lambda: RepairModel().setDomainThresholds(1.0, 1))
self.assertRaises(
TypeError,
"`input` should be provided as str/DataFrame, got int",
lambda: RepairModel().setInput(1))
self.assertRaises(
TypeError,
"`attrs` should be provided as list[str], got int",
lambda: RepairModel().setTargets(1))
self.assertRaises(
TypeError,
"`attrs` should be provided as list[str], got int in elements",
lambda: RepairModel().setTargets(["a", 1]))
self.assertRaises(
TypeError,
"`detectors` should be provided as list[ErrorDetector], got int in elements",
lambda: RepairModel().setErrorDetectors([1]))
self.assertRaises(
TypeError,
"`cf` should be provided as UpdateCostFunction, got int",
lambda: RepairModel().setUpdateCostFunction([1]))
def test_invalid_running_modes(self):
test_model = RepairModel() \
.setTableName("mixed_input") \
.setRowId("tid")
self.assertRaisesRegexp(
ValueError,
"Cannot compute repair scores when the maximal likelihood repair mode disabled",
lambda: test_model.run(compute_repair_score=True))
self.assertRaisesRegexp(
ValueError,
"Cannot enable the maximal likelihood repair mode when continous attributes found",
lambda: test_model.setMaximalLikelihoodRepairEnabled(True).setRepairDelta(1).run())
# TODO: We fix a seed for building a repair model, but inferred values fluctuate run-by-run.
# So, to avoid it, we set 1 to `hp.max_evals` for now.
def _build_model(self):
return RepairModel().option("hp.max_evals", "1")
def test_multiple_run(self):
# Checks if auto-generated views are dropped finally
current_view_nums = self.spark.sql("SHOW VIEWS").count()
def _test_basic():
test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid")
self.assertEqual(
test_model.run().orderBy("tid", "attribute").collect(),
self.expected_adult_result)
_test_basic() # first run
_test_basic() # second run
self.assertEqual(
self.spark.sql("SHOW VIEWS").count(),
current_view_nums)
def test_parallel_stat_training(self):
df = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.setParallelStatTrainingEnabled(True) \
.run()
self.assertEqual(
df.orderBy("tid", "attribute").collect(),
self.expected_adult_result)
def test_table_input(self):
with self.table("adult_table"):
# Tests for `setDbName`
self.spark.table("adult").write.mode("overwrite").saveAsTable("adult_table")
test_model = self._build_model() \
.setDbName("default") \
.setTableName("adult_table") \
.setRowId("tid")
self.assertEqual(
test_model.run().orderBy("tid", "attribute").collect(),
self.expected_adult_result)
def test_input_overwrite(self):
with self.table("adult_table"):
# Tests for input overwrite case ("default.adult_table" -> "adult")
self.spark.table("adult").write.mode("overwrite").saveAsTable("adult_table")
test_model = self._build_model() \
.setDbName("default") \
.setTableName("adult_table") \
.setInput(self.spark.table("adult")) \
.setRowId("tid")
self.assertEqual(
test_model.run().orderBy("tid", "attribute").collect(),
self.expected_adult_result)
def test_setInput(self):
def _test_setInput(input):
test_model = self._build_model().setInput(input).setRowId("tid")
self.assertEqual(
test_model.run().orderBy("tid", "attribute").collect(),
self.expected_adult_result)
_test_setInput("adult")
_test_setInput(self.spark.table("adult"))
def test_setTargets(self):
error_cells_df = expected_result = self.spark.table("adult_repair") \
.selectExpr("tid", "attribute") \
.orderBy("tid", "attribute")
def _test_setTargets(targets):
actual_result = self._build_model() \
.setInput("adult") \
.setRowId("tid") \
.setTargets(targets) \
.run() \
.selectExpr("tid", "attribute") \
.orderBy("tid", "attribute")
expected_result = error_cells_df \
.where("attribute IN ({})".format(",".join(map(lambda x: f"'{x}'", targets)))) \
.collect()
self.assertEqual(
actual_result.collect(),
expected_result)
_test_setTargets(["Sex"])
_test_setTargets(["Sex", "Income"])
_test_setTargets(["Age", "Sex"])
_test_setTargets(["Non-Existent", "Age"])
_test_setTargets(["Non-Existent"])
def test_setErrorCells(self):
def _test_setErrorCells(error_cells):
test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.setErrorCells(error_cells)
self.assertEqual(
test_model.run().orderBy("tid", "attribute").collect(),
self.expected_adult_result)
_test_setErrorCells("adult_dirty")
_test_setErrorCells(self.spark.table("adult_dirty"))
def test_setModelLoggingEnabled(self):
current_view_nums = self.spark.sql("SHOW VIEWS").count()
test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.setModelLoggingEnabled(True)
def _assert_log_view(m, expected_nrepairs, expected_nlogs):
self.assertEqual(len(m.run().collect()), expected_nrepairs)
self.assertEqual(
self.spark.sql("SHOW VIEWS").count(),
current_view_nums + 1)
views = self.spark.sql("SHOW VIEWS").where("viewName LIKE 'repair_model_%'").collect()
self.assertEqual(len(views), 1)
view_name = views[0].viewName
logs_df = self.spark.table(view_name)
self.assertEqual(logs_df.count(), expected_nlogs)
self.assertEqual(
logs_df.schema.simpleString(),
"struct<attributes:string,type:string,score:double,elapsed:double,"
"training_nrow:bigint,nclass:bigint,class_nrow_stdv:double>")
self.spark.sql(f"DROP VIEW {view_name}")
# Serial training case
_assert_log_view(test_model, 7, 3)
# Parallel training case
test_model = test_model.setTargets(["Age", "Sex"]).setParallelStatTrainingEnabled(True)
_assert_log_view(test_model, 5, 2)
def _assert_adult_without_repaired(self, test_model):
def _test(df):
self.assertEqual(
df.selectExpr("tid", "attribute", "current_value").orderBy("tid", "attribute").collect(),
self.expected_adult_result_without_repaired)
_test(test_model.setParallelStatTrainingEnabled(False).run())
_test(test_model.setParallelStatTrainingEnabled(True).run())
def test_setMaxTrainingRowNum(self):
row_num = int(self.spark.table("adult").count() / 2)
test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.setMaxTrainingRowNum(row_num)
self._assert_adult_without_repaired(test_model)
def test_setMaxTrainingColumnNum(self):
test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.setMaxTrainingColumnNum(2)
self._assert_adult_without_repaired(test_model)
def test_error_cells_having_no_existent_attribute(self):
error_cells = [
Row(tid=1, attribute="NoExistent"),
Row(tid=5, attribute="Income"),
Row(tid=16, attribute="Income")
]
error_cells_df = self.spark.createDataFrame(
error_cells, schema="tid STRING, attribute STRING")
test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.setErrorCells(error_cells_df)
self.assertEqual(
test_model.run().orderBy("tid", "attribute").collect(), [
Row(tid=5, attribute="Income", current_value=None, repaired="MoreThan50K"),
Row(tid=16, attribute="Income", current_value=None, repaired="MoreThan50K")])
def test_detect_errors_only(self):
# Tests for `NullErrorDetector`
null_errors = self._build_model() \
.setInput("adult") \
.setRowId("tid") \
.run(detect_errors_only=True)
self.assertEqual(
null_errors.orderBy("tid", "attribute").collect(),
self.expected_adult_result_without_repaired)
# Tests for `RegExErrorDetector`
error_detectors = [
NullErrorDetector(),
RegExErrorDetector("Exec-managerial"),
RegExErrorDetector("India")
]
regex_errors = self._build_model() \
.setInput("adult") \
.setRowId("tid") \
.setErrorDetectors(error_detectors) \
.run(detect_errors_only=True)
self.assertEqual(
regex_errors.subtract(null_errors).orderBy("tid", "attribute").collect(), [
Row(tid=1, attribute="Occupation", current_value="Exec-managerial"),
Row(tid=7, attribute="Country", current_value="India"),
Row(tid=12, attribute="Occupation", current_value="Exec-managerial"),
Row(tid=14, attribute="Occupation", current_value="Exec-managerial"),
Row(tid=16, attribute="Occupation", current_value="Exec-managerial")])
# Tests for `ConstraintErrorDetector`
constraint_path = "{}/adult_constraints.txt".format(os.getenv("REPAIR_TESTDATA"))
error_detectors = [
NullErrorDetector(),
ConstraintErrorDetector(constraint_path)
]
constraint_errors = self._build_model() \
.setInput("adult") \
.setRowId("tid") \
.setErrorDetectors(error_detectors) \
.run(detect_errors_only=True)
self.assertEqual(
constraint_errors.subtract(null_errors).orderBy("tid", "attribute").collect(), [
Row(tid=4, attribute="Relationship", current_value="Husband"),
Row(tid=4, attribute="Sex", current_value="Female"),
Row(tid=11, attribute="Relationship", current_value="Husband"),
Row(tid=11, attribute="Sex", current_value="Female")])
def test_repair_data(self):
test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid")
expected_result = self.spark.table("adult_clean") \
.orderBy("tid").collect()
self.assertEqual(
test_model.run(repair_data=True).orderBy("tid").collect(),
expected_result)
def test_unsupported_types(self):
with self.tempView("inputView"):
self.spark.range(1).selectExpr("id tid", "1 AS x", "CAST('2021-08-01' AS DATE) y") \
.createOrReplaceTempView("inputView")
test_model = self._build_model() \
.setTableName("inputView") \
.setRowId("tid")
self.assertRaises(AnalysisException, lambda: test_model.run())
def test_table_has_no_enough_columns(self):
with self.tempView("inputView"):
rows = [
(1, None),
(2, "test-1"),
(3, "test-1")
]
self.spark.createDataFrame(rows, ["tid", "x"]) \
.createOrReplaceTempView("inputView")
test_model = self._build_model() \
.setTableName("inputView") \
.setRowId("tid")
self.assertRaises(AnalysisException, lambda: test_model.run())
def test_rowid_uniqueness(self):
with self.tempView("inputView"):
rows = [
(1, 1, None),
(1, 1, "test-1"),
(1, 2, "test-1")
]
self.spark.createDataFrame(rows, ["tid", "x", "y"]) \
.createOrReplaceTempView("inputView")
test_model = self._build_model() \
.setTableName("inputView") \
.setRowId("tid")
self.assertRaises(AnalysisException, lambda: test_model.run())
def test_no_valid_discrete_feature_exists_1(self):
with self.tempView("inputView"):
rows = [
(1, "1", None),
(2, "1", None),
(3, "1", "test-1"),
(4, "1", "test-1"),
(5, "1", "test-1"),
(6, "1", None)
]
self.spark.createDataFrame(rows, ["tid", "x", "y"]) \
.createOrReplaceTempView("inputView")
test_model = self._build_model() \
.setTableName("inputView") \
.setRowId("tid")
self.assertRaisesRegexp(
ValueError,
"At least one valid discretizable feature is needed to repair error cells",
lambda: test_model.run())
def test_no_valid_discrete_feature_exists_2(self):
with self.tempView("inputView"):
rows = [
(1, "1", None),
(2, "2", "test-2"),
(3, "3", "test-3"),
(4, "4", "test-4"),
(5, "5", "test-5"),
(6, "6", "test-6")
]
self.spark.createDataFrame(rows, ["tid", "x", "y"]) \
.createOrReplaceTempView("inputView")
test_model = self._build_model() \
.setTableName("inputView") \
.setRowId("tid") \
.setDiscreteThreshold(3)
self.assertRaisesRegexp(
ValueError,
"At least one valid discretizable feature is needed to repair error cells",
lambda: test_model.run(detect_errors_only=False))
self.assertEqual(
test_model.run(detect_errors_only=True).collect(), [
Row(tid=1, attribute="y", current_value=None)])
def test_no_repairable_cell_exists(self):
with self.tempView("inputView"):
rows = [
(1, "1", None),
(2, "2", None),
(3, "1", "test-1"),
(4, "1", "test-1"),
(5, "1", "test-1"),
(6, "1", None)
]
self.spark.createDataFrame(rows, ["tid", "x", "y"]) \
.createOrReplaceTempView("inputView")
test_model = self._build_model() \
.setTableName("inputView") \
.setRowId("tid")
self.assertRaisesRegexp(
ValueError,
"To repair noisy cells, they should be discretizable",
lambda: test_model.run(detect_errors_only=False))
self.assertEqual(
test_model.run(detect_errors_only=True).orderBy("tid", "attribute").collect(), [
Row(tid=1, attribute="y", current_value=None),
Row(tid=2, attribute="y", current_value=None),
Row(tid=6, attribute="y", current_value=None)])
def test_regressor_model(self):
with self.tempView("inputView"):
rows = [
(1, 1.0, 1.0, 1.0),
(2, 1.5, 1.5, 1.5),
(3, 1.4, 1.4, None),
(4, 1.3, 1.3, 1.3),
(5, 1.1, 1.1, 1.1),
(6, 1.2, 1.2, None)
]
self.spark.createDataFrame(rows, ["tid", "x", "y", "z"]) \
.createOrReplaceTempView("inputView")
test_model = self._build_model() \
.setTableName("inputView") \
.setRowId("tid")
df = test_model.run().orderBy("tid", "attribute")
self.assertEqual(
df.selectExpr("tid", "attribute", "current_value").collect(), [
Row(tid=3, attribute="z", current_value=None),
Row(tid=6, attribute="z", current_value=None)])
rows = df.selectExpr("repaired").collect()
self.assertTrue(rows[0].repaired is not None)
self.assertTrue(rows[1].repaired is not None)
def test_rule_based_model(self):
with self.tempView("inputView", "errorCells"):
rows = [
(1, "1", "test-1"),
(2, "2", "test-2"),
(3, "1", None),
(4, "2", "test-2"),
(5, "2", None),
(6, "3", None)
]
self.spark.createDataFrame(rows, ["tid", "x", "y"]) \
.createOrReplaceTempView("inputView")
self.spark.createDataFrame([(3, "y"), (5, "y"), (6, "y")], ["tid", "attribute"]) \
.createOrReplaceTempView("errorCells")
with tempfile.NamedTemporaryFile("w+t") as f:
# Creates a file for constraints
f.write("t1&t2&EQ(t1.x,t2.x)&IQ(t1.y,t2.y)")
f.flush()
error_detectors = [
NullErrorDetector(),
ConstraintErrorDetector(f.name)
]
test_model = self._build_model() \
.setTableName("inputView") \
.setRowId("tid") \
.setErrorCells("errorCells") \
.setErrorDetectors(error_detectors) \
.setRuleBasedModelEnabled(True)
self.assertEqual(
test_model.run().orderBy("tid", "attribute").collect(), [
Row(tid=3, attribute="y", current_value=None, repaired="test-1"),
Row(tid=5, attribute="y", current_value=None, repaired="test-2"),
Row(tid=6, attribute="y", current_value=None, repaired=None)])
def test_repair_updates(self):
expected_result = self.spark.table("adult_clean") \
.orderBy("tid").collect()
repair_updates_df = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.run()
with self.tempView("repair_updates_view"):
repair_updates_df.createOrReplaceTempView("repair_updates_view")
df = RepairMisc() \
.option("repair_updates", "repair_updates_view") \
.option("table_name", "adult") \
.option("row_id", "tid") \
.repair()
self.assertEqual(
df.orderBy("tid").collect(),
expected_result)
def _check_adult_repair_prob_and_score(self, df, expected_schema):
self.assertEqual(
df.schema.simpleString(),
expected_schema)
self.assertEqual(
df.selectExpr("tid", "attribute", "current_value").orderBy("tid", "attribute").collect(),
self.expected_adult_result_without_repaired)
def test_compute_repair_candidate_prob(self):
repaired_df = test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.run(compute_repair_candidate_prob=True)
self._check_adult_repair_prob_and_score(
repaired_df,
"struct<tid:int,attribute:string,current_value:string,"
"pmf:array<struct<c:string,p:double>>>")
def test_compute_repair_prob(self):
repaired_df = test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.run(compute_repair_prob=True)
self._check_adult_repair_prob_and_score(
repaired_df,
"struct<tid:int,attribute:string,current_value:string,"
"repaired:string,prob:double>")
def test_compute_repair_score(self):
repaired_df = test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.setMaximalLikelihoodRepairEnabled(True) \
.setRepairDelta(1) \
.run(compute_repair_score=True)
self._check_adult_repair_prob_and_score(
repaired_df,
"struct<tid:int,attribute:string,current_value:string,"
"repaired:string,score:double>")
def test_maximal_likelihood_repair(self):
repaired_df = test_model = self._build_model() \
.setTableName("adult") \
.setRowId("tid") \
.setMaximalLikelihoodRepairEnabled(True) \
.setRepairDelta(3) \
.setUpdateCostFunction(Levenshtein()) \
.run()
self.assertEqual(
repaired_df.orderBy("tid", "attribute").collect(), [
Row(tid=3, attribute="Sex", current_value=None, repaired="Male"),
Row(tid=7, attribute="Sex", current_value=None, repaired="Male"),
Row(tid=12, attribute="Sex", current_value=None, repaired="Male")])
def test_compute_repair_prob_for_continouos_values(self):
test_model = test_model = self._build_model() \
.setTableName("mixed_input") \
.setRowId("tid")
def _test(df, expected_schema):
self.assertEqual(
df.schema.simpleString(),
expected_schema)
self.assertEqual(
df.selectExpr("tid", "attribute", "current_value").orderBy("tid", "attribute").collect(), [
Row(tid=3, attribute="v3", current_value=None),
Row(tid=7, attribute="v2", current_value=None),
Row(tid=10, attribute="v1", current_value=None),
Row(tid=14, attribute="v4", current_value=None)])
_test(test_model.run(compute_repair_candidate_prob=True),
"struct<tid:bigint,attribute:string,current_value:string,"
"pmf:array<struct<c:string,p:double>>>")
_test(test_model.run(compute_repair_prob=True),
"struct<tid:bigint,attribute:string,current_value:string,"
"repaired:string,prob:double>")
def test_integer_input(self):
with self.tempView("int_input"):
rows = [
(1, 1, 1, 3, 0),
(2, 2, None, 2, 1),
(3, 3, 2, 2, 0),
(4, 2, 2, 3, 1),
(5, None, 1, 3, 0),
(6, 2, 2, 3, 0),
(7, 3, 1, None, 0),
(8, 2, 1, 2, 1),
(9, 1, 1, 2, None)
]
self.spark.createDataFrame(rows, "tid: int, v1: byte, v2: short, v3: int, v4: long") \
.createOrReplaceTempView("int_input")
df = test_model = self._build_model() \
.setTableName("int_input") \
.setRowId("tid") \
.run()
self.assertEqual(
df.orderBy("tid", "attribute").collect(), [
Row(tid=2, attribute="v2", current_value=None, repaired="2"),
Row(tid=5, attribute="v1", current_value=None, repaired="2"),
Row(tid=7, attribute="v3", current_value=None, repaired="2"),
Row(tid=9, attribute="v4", current_value=None, repaired="1")])
def test_rule_based_model(self):
model = FunctionalDepModel("x", {1: "test-1", 2: "test-2", 3: "test-3"})
pdf = pd.DataFrame([[3], [1], [2], [4]], columns=["x"])
self.assertEqual(model.classes_.tolist(), ["test-1", "test-2", "test-3"])
self.assertEqual(model.predict(pdf), ["test-3", "test-1", "test-2", None])
pmf = model.predict_proba(pdf)
self.assertEqual(len(pmf), 4)
self.assertEqual(pmf[0].tolist(), [0.0, 0.0, 1.0])
self.assertEqual(pmf[1].tolist(), [1.0, 0.0, 0.0])
self.assertEqual(pmf[2].tolist(), [0.0, 1.0, 0.0])
self.assertEqual(pmf[3].tolist(), [0.0, 0.0, 0.0])
def test_PoorModel(self):
model = PoorModel(None)
pdf = pd.DataFrame([[3], [1], [2], [4]], columns=["x"])
self.assertEqual(model.classes_.tolist(), [None])
self.assertEqual(model.predict(pdf), [None, None, None, None])
pmf = model.predict_proba(pdf)
self.assertEqual(len(pmf), 4)
self.assertEqual(pmf[0].tolist(), [1.0])
self.assertEqual(pmf[1].tolist(), [1.0])
self.assertEqual(pmf[2].tolist(), [1.0])
self.assertEqual(pmf[3].tolist(), [1.0])
model = PoorModel("test")
pdf = | pd.DataFrame([[3], [1], [2], [4]], columns=["x"]) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("<KEY>")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
tm.assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
tm.assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat["A"] = df_cat["A"].astype("category")
df_cat["B"] = df_cat["B"].astype("category")
df_cat["C"] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(["D"], axis=1)
return df_cat
@pytest.mark.parametrize(
"operation, kwargs", [("agg", {"dtype": "category"}), ("apply", {})]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame(
{"A": ["foo", "foo", "bar", "bar"], "B": ["one", "two", "one", "three"]},
**kwargs,
)
)
expected = Series(data=[1, 3, 2, 4], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("operation", ["agg", "apply"])
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
],
names=["A", "B"],
).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
if operation == "agg":
expected = expected.fillna(0, downcast="infer")
grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"observed, index, data",
[
(
True,
MultiIndex.from_tuples(
[
("foo", "one", "min"),
("foo", "one", "max"),
("foo", "two", "min"),
("foo", "two", "max"),
("bar", "one", "min"),
("bar", "one", "max"),
("bar", "three", "min"),
("bar", "three", "max"),
],
names=["A", "B", None],
),
[1, 1, 3, 3, 2, 2, 4, 4],
),
(
False,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
(
None,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
],
)
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
lambda x: {"min": x.min(), "max": x.max()}
)
tm.assert_series_equal(result, expected)
def test_groupby_categorical_series_dataframe_consistent(df_cat):
# GH 20416
expected = df_cat.groupby(["A", "B"])["C"].mean()
result = df_cat.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
def test_groupby_categorical_axis_1(code):
# GH 13420
df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
cat = Categorical.from_codes(code, categories=list("abc"))
result = df.groupby(cat, axis=1).mean()
expected = df.T.groupby(cat, axis=0).mean().T
tm.assert_frame_equal(result, expected)
def test_groupby_cat_preserves_structure(observed, ordered):
# GH 28787
df = DataFrame(
{"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
columns=["Name", "Item"],
)
expected = df.copy()
result = (
df.groupby("Name", observed=observed)
.agg(DataFrame.sum, skipna=True)
.reset_index()
)
tm.assert_frame_equal(result, expected)
def test_get_nonexistent_category():
# Accessing a Category that is not in the dataframe
df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})
with pytest.raises(KeyError, match="'vau'"):
df.groupby("var").apply(
lambda rows: DataFrame(
{"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}
)
)
def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request):
# GH 17605
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABCD")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABCD")),
"value": [0.1] * 4,
}
)
args = {"nth": [0]}.get(reduction_func, [])
expected_length = 4 if observed else 16
series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
assert len(result) == expected_length
def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
reduction_func, request
):
# GH 17605
# Tests whether the unobserved categories in the result contain 0 or NaN
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABC")),
"value": [0.1] * 4,
}
)
unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
args = {"nth": [0]}.get(reduction_func, [])
series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]
for idx in unobserved:
val = result.loc[idx]
assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)
# If we expect unobserved values to be zero, we also expect the dtype to be int.
# Except for .sum(). If the observed categories sum to dtype=float (i.e. their
# sums have decimals), then the zeros for the missing categories should also be
# floats.
if zero_or_nan == 0 and reduction_func != "sum":
assert np.issubdtype(result.dtype, np.integer)
def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# does not return the categories that are not in df when observed=True
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
for cat in unobserved_cats:
assert cat not in res.index
@pytest.mark.parametrize("observed", [False, None])
def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
reduction_func, observed, request
):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# returns the categories that are not in df when observed=False/None
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
expected = _results_for_groupbys_with_missing_categories[reduction_func]
if expected is np.nan:
assert res.loc[unobserved_cats].isnull().all().all()
else:
assert (res.loc[unobserved_cats] == expected).all().all()
def test_series_groupby_categorical_aggregation_getitem():
# GH 8870
d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 20, 5))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=True, sort=True)
result = groups["foo"].agg("mean")
expected = groups.agg("mean")["foo"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, expected_values",
[(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])],
)
def test_groupby_agg_categorical_columns(func, expected_values):
# 31256
df = DataFrame(
{
"id": [0, 1, 2, 3, 4],
"groups": [0, 1, 1, 2, 2],
"value": Categorical([0, 0, 0, 0, 1]),
}
).set_index("id")
result = df.groupby("groups").agg(func)
expected = DataFrame(
{"value": expected_values}, index=Index([0, 1, 2], name="groups")
)
tm.assert_frame_equal(result, expected)
def test_groupby_agg_non_numeric():
df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])})
expected = | DataFrame({"A": [2, 1]}, index=[1, 2]) | pandas.DataFrame |
import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import imageio
import os
import shutil
def generate(slope, intercept):
data_points, randomness = 200, 30
x_data = np.linspace(0, 400, data_points)
rand = np.random.randint(-randomness, randomness, size=data_points)
# slope, intercept = 0.789, 60
y_data = slope * x_data + intercept
y_data = y_data + rand
dataset = np.column_stack([x_data, y_data])
df = pd.DataFrame(dataset, columns=["x", "y"])
df.to_csv("dataset.csv")
def gif(df, x, y, m, c):
os.makedirs("gif")
filenames = []
prog = 0.0
my_bar = st.progress(prog)
for i in range(0, len(m), 20):
plt.scatter(df[x], df[y], c="blue")
plt.plot(df[x], (m[i]*df[x]+c[i]))
plt.title("Regression Plot")
plt.xlabel("x")
plt.ylabel("y")
filename = f'./gif/{i}.png'
filenames.append(filename)
plt.savefig(filename)
plt.close()
my_bar.progress(prog)
prog = prog + (1.0/len(m))*20
with imageio.get_writer('plot.gif', mode='I') as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
shutil.rmtree("gif")
def regression(df, epochs_limit, learning_rate, x, y, intercept_priority):
m = 0
c = 0
m_ar = []
c_ar = []
losses = []
data_len = len(df)
loss = 100000000
loss_dif = epochs_limit + 1
epoch_count = 0
log = st.empty()
while(loss_dif > epochs_limit):
sum_m = 0
sum_c = 0
dm = 0
dc = 0
prev_loss = loss
for d in range(data_len):
sum_m = sum_m + (df[x][d] * (df[y][d] - (m*df[x][d]+c)))
sum_c = sum_c + (df[y][d] - (m*df[x][d]+c))
loss = loss + ((df[y][d] - (m*df[x][d]+c))
* (df[y][d] - (m*df[x][d]+c)))
dm = (-2/data_len)*sum_m
dc = (-2/data_len)*sum_c * intercept_priority
loss = loss/data_len
m = m-learning_rate*dm
c = c-learning_rate*dc
losses.append(loss)
m_ar.append(m)
c_ar.append(c)
loss_dif = prev_loss - loss
log.empty()
log.metric(label="Current Loss", value=loss, delta=-loss_dif)
epoch_count = epoch_count+1
st.write("Developing GIF, hold on... ")
gif(df, x, y, m_ar, c_ar)
return losses, m, c, epoch_count
def run(epochs_limit, intercept_priority):
x = "x"
y = "y"
st.header("Dataset and Scatter Plot ")
df = pd.read_csv("dataset.csv", usecols=[x, y])
col1, col2 = st.columns([1, 2])
col1.dataframe(df)
fig1 = plt.figure(1)
plt.scatter(df[x], df[y])
plt.title("Scatter Plot")
col2.pyplot(fig1)
st.write("***\n")
st.header("Running Regression ")
losses = []
losses, m, c, epochs = regression(
df, epochs_limit, 0.0000001, x, y, intercept_priority)
st.write("***")
st.header("Predictions ")
d1, d2 = st.columns(2)
d1.metric(label="Slope ", value=m)
d2. metric(label="Intecept ", value=c)
st.write("***")
col1, col2 = st.columns(2)
fig2 = plt.figure(2)
plt.scatter(df[x], df[y])
plt.plot(df[x], (m*df[x] + c))
plt.title("Predicted Regression Line")
plt.xlabel("x")
plt.ylabel("y")
col1.pyplot(fig2)
fig3 = plt.figure(3)
plt.plot(list(range(epochs)), losses)
plt.title("Learning Curve")
plt.xlabel("Epochs")
plt.ylabel("Loss")
col2.pyplot(fig3)
st.header("Regression Line Animation")
st.image("plot.gif")
def prep_dataset(dataset):
df = pd.read_csv(dataset)
x = str(st.sidebar.selectbox("Select dependent data", df.columns))
y = str(st.sidebar.selectbox("Select independent data", df.columns))
if st.sidebar.button("Confirm"):
x_data = df[x]
y_data = df[y]
dataset = np.column_stack([x_data, y_data])
df = | pd.DataFrame(dataset, columns=["x", "y"]) | pandas.DataFrame |
import datetime
import json
import numpy as np
import pandas as pd
import requests
import xarray as xr
from utils import divide_chunks, get_indices_not_done, \
get_site_codes, append_to_csv_column_wise, load_s3_zarr_store,\
convert_df_to_dataset
def get_all_streamflow_data(output_file, sites_file, huc2=None,
num_sites_per_chunk=5, start_date="1970-01-01",
end_date='2019-01-01', time_scale='H',
output_format='zarr', num_site_chunks_write=6,
s3=False):
"""
gets all streamflow data for a date range for a given huc2. Calls are
chunked by station
:param output_file: [str] path to the csv file or zarr store where the data
will be stored
:param sites_file: [str] path to file that contains the nwis site
information
:param huc2: [str] zero-padded huc 2 (e.g., "02")
:param num_sites_per_chunk: [int] the number of sites that will be pulled
at in each web service call
:param start_date: [str] the start date of when you want the data for
(e.g., "1980-01-01")
:param end_date: [str] the end date of when you want the data for
(e.g., "1990-01-01")
:param time_scale: [str] Pandas like time string for the time scale at which
the data will be aggregated (e.g., 'H' for hour or 'D' for daily)
:param output_format: [str] the format of the output file. 'csv' or 'zarr'
:param num_site_chunks_write:
:param S3:
:return: None
"""
product = get_product_from_time_scale(time_scale)
site_codes = get_site_codes(sites_file, huc2)
not_done_sites = get_indices_not_done(output_file, site_codes, 'site_code',
output_format, is_column=False,
s3=s3)
site_codes_chunked = divide_chunks(not_done_sites, num_sites_per_chunk)
# loop through site_code_chunks
chunk_dfs = []
i = 0
for site_chunk in site_codes_chunked:
last_chunk = False
if site_chunk[-1] == not_done_sites[-1]:
last_chunk = True
streamflow_df_sites = None
# catch if there is a problem on the server retrieving the data
try:
streamflow_df_sites = get_streamflow_data(site_chunk,
start_date,
end_date,
product,
time_scale)
except json.decoder.JSONDecodeError:
continue
if streamflow_df_sites is not None:
chunk_dfs.append(streamflow_df_sites)
# add the number of stations for which we got data
i += streamflow_df_sites.shape[1]
if not i % (num_site_chunks_write * num_sites_per_chunk) or \
last_chunk:
print('writing out', flush=True)
write_out_chunks(chunk_dfs, output_file, output_format)
chunk_dfs = []
def write_out_chunks(chunks_dfs, out_file, out_format):
all_chunks_df = pd.concat(chunks_dfs, axis=1)
# write the data out to the output file
if out_format == 'zarr':
zarr_store = load_s3_zarr_store(out_file)
append_to_zarr(all_chunks_df, zarr_store)
elif out_format == 'csv':
append_to_csv_column_wise(all_chunks_df, out_file)
else:
raise ValueError("output_format should be 'csv' or 'zarr'")
def get_product_from_time_scale(time_scale):
"""
get the the USGS nwis product that is appropriate for the time scale
:param time_scale: str - Pandas like time string for the time scale at which
the data will be aggregated (e.g., 'H' for hour or 'D' for daily)
:return:
"""
iv_scales = ['15T', 'T', 'H']
dv_scale = ['D']
if time_scale in iv_scales:
return 'iv'
elif time_scale in dv_scale:
return 'dv'
else:
raise ValueError("time scale must be '15T', 'T', 'H', or 'D'")
def append_to_zarr(streamflow_df, output_zarr):
# chunks
time_chunk = len(streamflow_df.index)
site_code_chunk = len(streamflow_df.columns)
ds = convert_df_to_dataset(streamflow_df, 'site_code', 'datetime',
'streamflow', {'datetime': time_chunk,
'site_code': site_code_chunk})
ds.to_zarr(output_zarr, append_dim='site_code', mode='a')
def get_streamflow_data(sites, start_date, end_date, product, time_scale):
response = call_nwis_service(sites, start_date, end_date, product)
data = json.loads(response.text)
streamflow_df = nwis_json_to_df(data, start_date, end_date,
time_scale)
return streamflow_df
def call_nwis_service(sites, start_date, end_date, product):
"""
gets the data for a list of sites from a start date to an end date
"""
base_url = "http://waterservices.usgs.gov/nwis/{}/?format=json&sites={}&" \
"startDT={}&endDT={}¶meterCd=00060&siteStatus=all"
url = base_url.format(product, ",".join(sites), start_date, end_date)
request_start_time = datetime.datetime.now()
print(f"starting request for sites {sites} at {request_start_time}, "
f"for period {start_date} to {end_date}", flush=True)
r = None
while not r:
try:
r = requests.get(url)
except:
print('there was some problem. trying again', flush=True)
request_end_time = datetime.datetime.now()
request_time = request_end_time - request_start_time
print(f"took {request_time} to get data for huc {sites}", flush=True)
return r
def format_dates(datetime_txt):
# convert datetime
datetime_ser = pd.to_datetime(datetime_txt, utc=True)
# remove the time zone info since we are now in utc
datetime_ser = datetime_ser.dt.tz_localize(None)
return datetime_ser
def resample_reindex(df, start_date, end_date, time_scale):
# resample to get mean at correct time scale
df_resamp = df.resample(time_scale).mean()
# get new index
date_index = pd.date_range(start=start_date, end=end_date,
freq=time_scale)
# make so the index goes from start to end regardless of actual data
# presence
df_reindexed = df_resamp.reindex(date_index)
return df_reindexed
def delete_non_approved_data(df):
"""
disregard the data that do not have the "approved" tag in the qualifier
column
:param df: dataframe with qualifiers
:return: dataframe with just the values that are approved
"""
# first I have to get the actual qualifiers. originally, these are lists
# in a column in the df (e.g., [A, [91]]
# todo: what does the number mean (i.e., [91])
qualifiers_list = df['qualifiers'].to_list()
qualifiers = [q[0] for q in qualifiers_list]
# check qualifier's list
if qualifiers[0] not in ['A', 'P']:
print("we have a weird qualifier. it is ", qualifiers[0])
qualifier_ser = pd.Series(qualifiers, index=df.index)
approved_indices = (qualifier_ser == 'A')
approved_df = df[approved_indices]
return approved_df
def format_df(ts_df, site_code, start_date, end_date, time_scale,
only_approved=True):
"""
format unformatted dataframe. this includes setting a datetime index,
resampling, reindexing to the start and end date,
renaming the column to the site code, removing the qualifier column and
optionally screening out any data points that are not approved
:param ts_df: (dataframe) unformatted time series dataframe from nwis json
data
:param site_code: (str) the site_code of the site (taken from json data)
:param start_date: (str) start date of call
:param end_date: (str) end date of call
:param time_scale: (str) time scale in which you want to resample and at
which your new index will be. should be a code (i.e., 'H' for hourly)
:param only_approved: (bool) whether or not to screen out non-approved data
points
:return: formatted dataframe
"""
# convert datetime
ts_df['dateTime'] = format_dates(ts_df['dateTime'])
ts_df.set_index('dateTime', inplace=True)
if only_approved:
# get rid of any points that were not approved
ts_df = delete_non_approved_data(ts_df)
# delete qualifiers column
del ts_df['qualifiers']
# rename the column from 'value' to the site_code
ts_df = ts_df.rename(columns={'value': site_code})
# make the values numeric
ts_df[site_code] = pd.to_numeric(ts_df[site_code])
ts_df = resample_reindex(ts_df, start_date, end_date, time_scale)
return ts_df
def nwis_json_to_df(json_data, start_date, end_date, time_scale='H'):
"""
combine time series in json produced by nwis web from multiple sites into
one pandas df. the df is also resampled to a time scale and reindexed so
the dataframes are from the start date to the end date regardless of
whether there is data available or not
"""
df_collection = []
time_series = json_data['value']['timeSeries']
for ts in time_series:
site_code = ts['sourceInfo']['siteCode'][0]['value']
print('processing the data for site ', site_code, flush=True)
# this is where the actual data is
ts_data = ts['values'][0]['value']
if ts_data:
ts_df = | pd.DataFrame(ts_data) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool_, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool_, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = concat([Series(dtype=dtype), Series(dtype=dtype)])
assert result.dtype == dtype
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = concat([Series(dtype=dtype), | Series(dtype=dtype2) | pandas.Series |
import pandas
vaha = pandas.read_csv("vaha.txt", encoding="utf-8", sep="\t")
dny = vaha["den"]
cislo_dne = dny.str[3:]
cislo_dne = cislo_dne.str.replace(".", "")
cislo_dne = | pandas.to_numeric(cislo_dne) | pandas.to_numeric |
# noinspection PyPackageRequirements
import datawrangler as dw
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.io as pio
import plotly.graph_objects as go
import matplotlib as mpl
from ..core import get_default_options, eval_dict, get, fullfact
defaults = eval_dict(get_default_options()['plot'])
def match_color(img, c):
img = np.atleast_2d(img)
c = np.atleast_2d(c)
if np.ndim(img) == 3:
all_inds = np.squeeze(np.zeros_like(img)[:, :, 0])
else:
all_inds = np.squeeze(np.zeros_like(img[:, 0]))
for i in range(c.shape[0]):
# noinspection PyShadowingNames
inds = np.zeros_like(img)
for j in range(c.shape[1]):
if np.ndim(img) == 3:
inds[:, :, j] = np.isclose(img[:, :, j], c[i, j])
else:
inds[:, j] = np.isclose(img[:, j], c[i, j])
all_inds = (all_inds + (np.sum(inds, axis=np.ndim(img) - 1) == c.shape[1])) > 0
return np.where(all_inds)
def group_mean(x):
@dw.decorate.apply_unstacked
def helper(y):
return pd.DataFrame(y.mean(axis=0)).T
means = helper(x)
if hasattr(means.index, 'levels'):
n_levels = len(means.index.levels)
if n_levels > 1:
index = pd.MultiIndex.from_frame(means.index.to_frame().iloc[:, :-1])
return | pd.DataFrame(data=means.values, columns=means.columns, index=index) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
"""
Created on Sat Oct 24 14:52:17 2020
@author: kirksmi
"""
from sklearn.metrics import confusion_matrix
import xgboost
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import (accuracy_score, f1_score, recall_score,
matthews_corrcoef, precision_score,
roc_curve, auc)
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
import warnings
import seaborn as sns
import copy
from sklearn.utils import class_weight
from sklearn.tree import DecisionTreeClassifier
from itertools import compress
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
# from dtreeviz.trees import *
# os.environ["PATH"] += os.pathsep + r"C:\\Users\\kirksmi\\anaconda3\\envs\\env\\lib\\site-packages\\graphviz"
# import graphviz
from imblearn.over_sampling import SMOTE, ADASYN
from imblearn.under_sampling import NearMiss
import math
from sklearn.model_selection import GridSearchCV
import shap
from collections import Counter
from matplotlib.lines import Line2D
import matplotlib.colors as mcol
import matplotlib.cm as pltcm
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from matplotlib.ticker import FormatStrFormatter
import time
import sys
import matplotlib.cm as cm
shap.initjs()
# add library module to PYTHONPATH
sys.path.append(f"{os.getcwd()}/../")
def prune(tree):
'''
This function will get rid of repetitive branches in decision trees
which lead to the same class prediciton.
Function written by GitHub user davidje13 (https://github.com/scikit-learn/scikit-learn/issues/10810)
Function Inputs:
---------
tree: decision tree classifier
Function Outputs:
---------
tree: pruned decision tree classifier
'''
tree = copy.deepcopy(tree)
dat = tree.tree_
nodes = range(0, dat.node_count)
ls = dat.children_left
rs = dat.children_right
classes = [[list(e).index(max(e)) for e in v] for v in dat.value]
leaves = [(ls[i] == rs[i]) for i in nodes]
LEAF = -1
for i in reversed(nodes):
if leaves[i]:
continue
if leaves[ls[i]] and leaves[rs[i]] and classes[ls[i]] == classes[rs[i]]:
ls[i] = rs[i] = LEAF
leaves[i] = True
return tree
def prune_index(inner_tree, index, threshold):
'''
This function will traverse a decision tree and remove any leaves with
a count class less than the given threshold.
Function written by <NAME>
(https://stackoverflow.com/questions/49428469/pruning-decision-trees)
Function Inputs:
---------
inner_tree: tree object (.tree_) from decision tree classifier
index: where to start pruning tree from (0 for the root)
threshold: minimum class count in leaf
'''
if inner_tree.value[index].min() < threshold:
# turn node into a leaf by "unlinking" its children
inner_tree.children_left[index] = TREE_LEAF
inner_tree.children_right[index] = TREE_LEAF
# if there are children, visit them as well
if inner_tree.children_left[index] != TREE_LEAF:
prune_index(inner_tree, inner_tree.children_left[index], threshold)
prune_index(inner_tree, inner_tree.children_right[index], threshold)
def make_confusion_matrix(y_true=None,
y_pred=None,
cf=None,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=(8, 6),
cmap='Blues',
title=None):
'''
This function will make a pretty plot of an sklearn Confusion Matrix
using a Seaborn heatmap visualization.
Basis of function from https://medium.com/@dtuk81/confusion-matrix-visualization-fc31e3f30fea
Function Inputs:
---------
y_true: Array of experimental class labels
y_pred: Array of predicted class labels
cf: Can input pre-made confusion matrix rather than make one using y_true and y_pred.
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
if cf is None:
cf = confusion_matrix(y_true, y_pred)
blanks = ['' for i in range(cf.size)]
hfont = {'fontname': 'Arial'}
if group_names and len(group_names) == cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(
value) for value in cf.flatten()/np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(
group_labels, group_counts, group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0], cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
# if it is a binary or multi-class confusion matrix
if len(categories) == 2:
avg = "binary"
else:
avg = "macro"
if y_true is None:
diag = np.diagonal(cf)
accuracy = sum(diag) / cf.sum()
recalls = []
precisions = []
f1s = []
mccs = []
for i in range(len(categories)):
nums = [*range(len(categories))]
nums.remove(i)
# print("Current nums: ", nums)
TP = diag[i]
FN = sum(cf[i, nums])
TN = np.delete(np.delete(cf, i, 0), i, 1).sum()
FP = sum(cf[nums, i])
r = TP/(TP+FN)
p = TP / (TP+FP)
f = 2*(r*p)/(r+p)
m = (TP*TN-FP*FN)/math.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))
recalls.append(r)
precisions.append(p)
f1s.append(f)
mccs.append(m)
recall = np.mean(recalls)
precision = np.mean(precisions)
f1 = np.mean(f1s)
mcc = np.mean(mccs)
else:
accuracy = accuracy_score(y_true, y_pred)
precision = precision_score(y_true, y_pred, average=avg)
recall = recall_score(y_true, y_pred, average=avg)
f1 = f1_score(y_true, y_pred, average=avg)
mcc = matthews_corrcoef(y_true, y_pred)
# r = np.corrcoef(y_true, y_pred)[0, 1]
if sum_stats:
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}\nMCC={:0.3f}".format(
accuracy, precision, recall, f1, mcc)
else:
stats_text = ""
if xyticks == False:
# Do not show categories if xyticks is False
categories = False
if categories == 'auto':
categories = range(len(categories))
# MAKE THE HEATMAP VISUALIZATION
fig, ax = plt.subplots(figsize=figsize)
sns.set(font="Arial")
ax = sns.heatmap(cf, annot=box_labels, fmt="",
cmap=cmap, cbar=cbar,
annot_kws={"size": 28}) # 22
ax.set_yticklabels(labels=categories, rotation=90, va="center",
fontsize=24, **hfont)
ax.set_xticklabels(labels=categories,
fontsize=24, **hfont) # 20
# FORMATTING THE CONFUSION MATRIX LABELS/TEXT
# if labels, put stats to right of CM
if xyplotlabels:
plt.ylabel('True label', fontweight='bold', **hfont)
plt.xlabel('Predicted label' + stats_text, fontweight='bold', **hfont)
elif cbar: # show color bar on right and stats below
plt.xlabel(stats_text, fontsize=15, **hfont)
else: # no color or labels, so put stats on right
ax.yaxis.set_label_position("right")
ax.yaxis.set_label_coords(1.25, 0.75)
plt.ylabel(stats_text, fontsize=18, rotation=0, **hfont) # labelpad=75
if title:
plt.title(title, **hfont)
plt.tight_layout()
return ax
def train_model(X, y, condition="CAROM", num_folds=5, class_names=None,
depth="deep", imbalance="none", pos_label=1,
random_seed=123, num_iter=25,
fig_path='../figures/crossval/'):
'''
This function trains an XGBoost model for predicting post-translational
modifications (the target array) given a features matrix.
For binary models, the pos_label argument can be used to designate the
class of interest.
For multi-class models, it is assumed that the middle class is Unregulated
and the lower/upper classes mark Phos and Acetyl, respectively
(e.g -1=Phos, 0=Unreg, 1=Acetyl)
The function uses RandomizedGridSearch within cross-validation to tune
the XGBoost hyperparameters and estimate model performance. The final model
uses the hyperparameters from the best-scoring CV fold and is trained on
the entire dataset.
Function Inputs:
---------
1. X: Predictor features matrix
2. y: Target variable
3. condition: String used to identify dataset condition (e.g "e.coli").
Used to name/save plots.
4. num_folds: Number of cross-validation folds
5. class_names: Names of target classes
6. depth: Determines the range of values for tuning the max_depth
hyperparameter. Options are "deep" (default) or "shallow"
7. imbalance Determines the method for addressing class imbalance:
a.) List of two floats (for multi-class) or single float
(for binary): Model will use SMOTE oversampling for the
Phos/Acetyl classes. This assumes that Unregulated is
largest class. Ex: [0.5, 0.75] --> Phos class will be
over-sampled to 50% of size of Unreg, Acetyl to 75% of
Unreg.
b.) "adasyn": uses Adasyn over-sampling method. Phos and
Acetyl classes over-sampled to 75% of Unreg class.
c.) "undersample": undersamples larger classes to size of
smallest class.
d.) "none" (default): class balances are not adjusted
e.) "balanced": inverse proportion of classes are used to
assign class weights for "sample_weights" argument in
hyperparameter tuning
8. pos_label: Used to identify class of interest for binary problems. Does
not affect multi-class problems.
9. random_seed: Random seed used for cross-validation splits and hyperparameter
tuning.
10. num_iter: Number of iterations to run for each fold of RandomizedGridSearch.
11. fig_path: Directory to save figures. Default is '../figures/crossval/'
Function Outputs:
---------
1. XGBoost model (fitted to entire dataset)
2. Dataframe w/ XGBoost cross-val scores
'''
start = time.time()
# define font type for plots
pltFont = {'fontname': 'Arial'}
# define feature names
feat_names = X.columns
# transform Target classes to 0, 1, 2 (this helps XGBoost)
le = preprocessing.LabelEncoder()
le.fit(y)
y = pd.Series(le.transform(y))
# if class names not given, use class integers
if class_names is None:
class_names = []
for cl in y.unique():
class_names.append(np.array2string(cl))
num_class = len(np.unique(y))
print("Number of class: {}".format(num_class))
# hyperparameters to tune
# (max_depth adjusted based on 'depth' argument)
if depth == "shallow":
params = {
"learning_rate": [0.01, 0.05, 0.1, 0.3],
"max_depth": range(3, 8, 1), # range(4,11,2),
"min_child_weight": [3, 5, 7, 10],
"subsample": [0.7, 0.8, 0.9],
"colsample_bytree": [0.7, 0.8, 0.9],
}
elif depth == "deep":
params = {
"learning_rate": [0.01, 0.05, 0.1, 0.3],
"max_depth": range(4, 11, 2),
"min_child_weight": [3, 5, 7],
"subsample": [0.8, 0.9],
"colsample_bytree": [0.8, 0.9]
}
##### Cross-validation training #####
# Define classifiers and hyperparameter search, based on binary vs multi-class problem
if num_class == 2:
print("TRAINING BINARY MODEL!")
# define classifier and hyperparameter tuning
classifier = xgboost.XGBClassifier(objective='binary:logistic',
n_estimators=150,
use_label_encoder=False,
eval_metric='logloss',
random_state=random_seed)
random_search = RandomizedSearchCV(classifier, param_distributions=params,
n_iter=num_iter, scoring='f1',
n_jobs=-1, cv=num_folds, verbose=3,
random_state=random_seed)
avg = "binary"
elif num_class > 2:
print("TRAINING MULTI-CLASS MODEL!")
classifier = xgboost.XGBClassifier(objective='multi:softmax',
n_estimators=150,
use_label_encoder=False,
num_class=num_class,
eval_metric='mlogloss',
random_state=random_seed) # multi:softmax
random_search = RandomizedSearchCV(classifier, param_distributions=params,
n_iter=num_iter, scoring='f1_macro',
n_jobs=-1, cv=num_folds, verbose=3,
random_state=random_seed)
avg = "macro"
# Stratified cross-val split
cv = StratifiedKFold(n_splits=num_folds,
shuffle=True,
random_state=random_seed)
# create empty lists to store CV scores, confusion mat, etc.
acc_list = []
recall_list = []
precision_list = []
f1_list = []
mcc_list = []
auc_list = []
r_list = []
y_test = []
y_pred = []
cmCV = np.zeros((num_class, num_class))
paramDict = {}
count = 0
# loop through cross-val folds
for train_index, test_index in cv.split(X, y):
print("\n Cross-val Fold # {} \n".format(count))
X_trainCV, X_testCV = X.iloc[train_index], X.iloc[test_index]
y_trainCV, y_testCV = y.iloc[train_index], y.iloc[test_index]
# train and fit model according to the desired class imbalance method
if isinstance(imbalance, (list, float)):
class_values = y_trainCV.value_counts()
if num_class > 2:
smote_dict = {0: int(round(class_values[1]*imbalance[0])),
1: class_values[1],
2: int(round(class_values[1]*imbalance[1]))}
else:
smote_dict = {0: class_values[0],
1: int(round(class_values[0]*imbalance))}
print(smote_dict)
oversample = SMOTE(sampling_strategy=smote_dict)
X_trainCV, y_trainCV = oversample.fit_resample(
X_trainCV, y_trainCV)
random_search.fit(X_trainCV, y_trainCV)
elif imbalance == "adasyn":
class_values = y_trainCV.value_counts()
smote_dict = {0: int(round(class_values[1]*0.75)),
1: class_values[1],
2: int(round(class_values[1]*0.75))}
ada = ADASYN(sampling_strategy=smote_dict,
random_state=random_seed, n_neighbors=10)
X_trainCV, y_trainCV = ada.fit_resample(X_trainCV, y_trainCV)
random_search.fit(X_trainCV, y_trainCV)
elif imbalance == "undersample":
nr = NearMiss()
X_trainCV, y_trainCV = nr.fit_sample(X_trainCV, y_trainCV)
random_search.fit(X_trainCV, y_trainCV)
elif imbalance == "none":
random_search.fit(X_trainCV, y_trainCV)
elif imbalance == "balanced":
weights = class_weight.compute_sample_weight("balanced", y_trainCV)
random_search.fit(X_trainCV, y_trainCV,
sample_weight=weights)
# get best estimator from random search
randomSearch_mdl = random_search.best_estimator_
# tune gamma and get new best estimator
params_gamma = {'gamma': [0, 0.1, 0.3, 0.5]}
gamma_search = GridSearchCV(estimator=randomSearch_mdl,
param_grid=params_gamma,
scoring='f1_macro',
n_jobs=-1, cv=3)
gamma_search.fit(X_trainCV, y_trainCV)
best_Mdl = gamma_search.best_estimator_
# print and store best params for current fold
print("Model Params: \n {}".format(best_Mdl))
paramDict[count] = best_Mdl.get_params
# make model predictions on X_testCV and store results
y_predCV = best_Mdl.predict(X_testCV)
y_proba = best_Mdl.predict_proba(X_testCV)
y_test.extend(y_testCV)
y_pred.extend(y_predCV)
cm = confusion_matrix(y_testCV, y_predCV)
print("current cm: \n", cm)
cmCV = cmCV+cm # update overal confusion mat
print("Combined cm: \n", cmCV)
# calculate auc
if num_class > 2:
mean_auc = plot_roc(best_Mdl,
X_trainCV, y_trainCV,
X_testCV, y_testCV,
class_names=class_names,
pos_class=pos_label,
figsize=(8, 6),
show=False)[0]
# fig.savefig("../figures/crossval/{}_XGBcrossval_ROC{}.png".format(condition, count),
# dpi=600)
else:
preds = y_proba[:, pos_label]
fpr, tpr, threshold = roc_curve(y_testCV, preds)
mean_auc = auc(fpr, tpr)
# calculate classification scores and store
accuracy = accuracy_score(y_testCV, y_predCV)
f1 = f1_score(y_testCV, y_predCV,
average=avg, pos_label=pos_label)
recall = recall_score(y_testCV, y_predCV,
average=avg, pos_label=pos_label)
precision = precision_score(y_testCV, y_predCV,
average=avg, pos_label=pos_label)
mcc = matthews_corrcoef(y_testCV, y_predCV)
r = np.corrcoef(y_testCV, y_predCV)[0, 1]
acc_list.append(accuracy)
recall_list.append(recall)
precision_list.append(precision)
f1_list.append(f1)
mcc_list.append(mcc)
auc_list.append(mean_auc)
r_list.append(r)
count = count+1
# print final confusion mat
print("final CV confusion matrix: \n", cmCV)
# plot confusion matrix results
path = fig_path
try:
os.makedirs(path)
except OSError:
print("Directory already created")
make_confusion_matrix(y_test, y_pred, figsize=(8, 6), categories=class_names,
xyplotlabels=True, cbar=False, sum_stats=False)
plt.ylabel("Experimental Labels", fontsize=24)
plt.xlabel("Predicted Labels", fontsize=24)
plt.tight_layout()
plt.savefig(path+"/{}_XGBcrossval_confusionMat.png".format(condition),
dpi=600)
plt.show()
# get average scores
Accuracy = np.mean(acc_list)
F1 = np.mean(f1_list)
Precision = np.mean(precision_list)
Recall = np.mean(recall_list)
MCC = np.mean(mcc_list)
AUC = np.mean(auc_list)
Corr = np.mean(r_list)
scores = [Accuracy, Recall, Precision, F1, MCC, AUC, Corr]
# get stats for CV scores
loop_scores = {'Accuracy': acc_list,
'Recall': recall_list,
'Precision': precision_list,
'F1': f1_list,
'MCC': mcc_list,
'AUC': auc_list,
'R': r_list}
df_loop_scores = pd.DataFrame(loop_scores)
print("Model score statistics: ")
loop_stats = df_loop_scores.describe()
print(loop_stats)
# plot CV scores
plt.rcParams.update(plt.rcParamsDefault)
plt.rcParams['xtick.major.pad'] = '10'
fig, ax = plt.subplots(figsize=(8, 6))
ax.bar(df_loop_scores.columns, scores,
yerr=loop_stats.loc['std', :],
align='center',
alpha=0.5,
ecolor='black',
capsize=10,
width=0.8)
ax.set_ylim([0, 1.0])
plt.yticks(**pltFont)
ax.set_xticks(df_loop_scores.columns)
ax.set_xticklabels(df_loop_scores.columns, **pltFont,
rotation=45, ha="right", rotation_mode="anchor")
ax.tick_params(axis='both', which='major', labelsize=24)
ax.yaxis.grid(True)
plt.tight_layout()
plt.savefig(path+'/{}_XGB_crossVal_barGraph.png'.format(condition),
bbox_inches='tight', dpi=600)
plt.show()
# create dataframe with mean scores
data = {'Metric': ['Acc', 'Recall', 'Precision', 'F1', 'MCC', 'AUC', 'PearsonsR'],
'Scores': [Accuracy, Recall, Precision, F1, MCC, AUC, Corr]}
df_scores = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import theano
from utility import *
from collections import defaultdict
from itertools import permutations, combinations, combinations_with_replacement
import warnings
warnings.simplefilter(action = "ignore")
class SprolimData(object):
'''
wrapper for pandas DataFrame
creates theano shared variables for various columns in data and makes them
available as attributes in the same way a standard DataFrame does; converts
columns to integer indices prior to wrapping in shared variable
'''
def __init__(self, data, nltrs=None):
'''
Parameters
----------
data : pandas.DataFrame
A dataframe containing the following columns:
- annotatorid (optional)
- sentenceid
- predicate
- predpos
- gramfunc
- predpos
- argpos
- property
- applicable
- response
nltrs : pandas.DataFrame
A dataframe containing the following columns:
- predicate
- nltrs
'''
self._data = data
self._nltrs = nltrs
self._ident = ''.join([str(i) for i in np.random.choice(9, size=10)])
self._extract_data_attrs()
self._extract_predicate_list()
self._append_argument_info()
self._append_combined_columns()
self._indexify_response()
self._partition_data()
self._create_predicate_token_indices()
self._create_shared_variables()
self._set_counts()
def _extract_data_attrs(self):
self._nproperties = self._data.property.unique().shape[0]
self._nresponsetypes = (self._data.response-self._data.response.min()).max() + 1
self._ngramfuncs = get_num_of_types(self._data.gramfunc)
if 'annotatorid' in self._data.columns:
self._data['annotatorid'] = get_codes(self._data['annotatorid'])
self._nannotators = self._data.annotatorid.max() + 1
else:
self._data['annotatorid'] = 0
self._nannotators = 1
def _extract_predicate_list(self):
predicates = self._data.predicate.astype('category')
self._predicate_list = np.array(predicates.cat.categories)
gramfunc = self._data.gramfunc.astype('category')
self._ycats = np.array(gramfunc.cat.categories)
def _append_argument_info(self):
argcounter = lambda x: np.unique(x).shape[0]
h = ['sentenceid', 'predicate', 'predpos']
## append the number of arguments for each predicate token and the relative
## position of each argument relative to the predicate
self._data['nargs'] = self._data.groupby(h).argpos.transform(argcounter)
self._data['argposrel'] = self._data.groupby(h).argpos.transform(get_codes)
## append the number of l-thematic roles for each predicate
if self._nltrs is None:
self._nltrs = self._data.groupby('predicate').nargs.apply(np.max).reset_index()
self._data['nltrs'] = self._data.groupby('predicate').nargs.transform(np.max)
else:
self._data = | pd.merge(self._data, self._nltrs) | pandas.merge |
from copy import deepcopy
import numpy as np
import pandas as pd
import torch as t
import torch.nn as nn
from scipy import constants, linalg
from pyhdx.fileIO import dataframe_to_file
from pyhdx.models import Protein
from pyhdx.config import cfg
# TORCH_DTYPE = t.double
# TORCH_DEVICE = t.device('cpu')
class DeltaGFit(nn.Module):
def __init__(self, deltaG):
super(DeltaGFit, self).__init__()
#self.deltaG = deltaG
self.register_parameter(name='deltaG', param=nn.Parameter(deltaG))
def forward(self, temperature, X, k_int, timepoints):
"""
# inputs, list of:
temperatures: scalar (1,)
X (N_peptides, N_residues)
k_int: (N_peptides, 1)
"""
pfact = t.exp(self.deltaG / (constants.R * temperature))
uptake = 1 - t.exp(-t.matmul((k_int / (1 + pfact)), timepoints))
return t.matmul(X, uptake)
def estimate_errors(hdxm, deltaG):
"""
Calculate covariances and uncertainty (perr, experimental)
Parameters
----------
hdxm : :class:`~pyhdx.models.HDXMeasurement`
deltaG : :class:`~numpy.ndarray`
Array with deltaG values.
Returns
-------
"""
dtype = t.float64
joined = pd.concat([deltaG, hdxm.coverage['exchanges']], axis=1, keys=['dG', 'ex'])
dG = joined.query('ex==True')['dG']
deltaG = t.tensor(dG.to_numpy(), dtype=dtype)
tensors = {k: v.cpu() for k, v in hdxm.get_tensors(exchanges=True, dtype=dtype).items()}
def hes_loss(deltaG_input):
criterion = t.nn.MSELoss(reduction='sum')
pfact = t.exp(deltaG_input.unsqueeze(-1) / (constants.R * tensors['temperature']))
uptake = 1 - t.exp(-t.matmul((tensors['k_int'] / (1 + pfact)), tensors['timepoints']))
d_calc = t.matmul(tensors['X'], uptake)
loss = criterion(d_calc, tensors['d_exp'])
return loss
hessian = t.autograd.functional.hessian(hes_loss, deltaG)
hessian_inverse = t.inverse(-hessian)
covariance = np.sqrt(np.abs(np.diagonal(hessian_inverse)))
cov_series = pd.Series(covariance, index=dG.index, name='covariance')
def jac_loss(deltaG_input):
pfact = t.exp(deltaG_input.unsqueeze(-1) / (constants.R * tensors['temperature']))
uptake = 1 - t.exp(-t.matmul((tensors['k_int'] / (1 + pfact)), tensors['timepoints']))
d_calc = t.matmul(tensors['X'], uptake)
residuals = (d_calc - tensors['d_exp'])
return residuals.flatten()
# https://stackoverflow.com/questions/42388139/how-to-compute-standard-deviation-errors-with-scipy-optimize-least-squares
jacobian = t.autograd.functional.jacobian(jac_loss, deltaG).numpy()
U, s, Vh = linalg.svd(jacobian, full_matrices=False)
tol = np.finfo(float).eps * s[0] * max(jacobian.shape)
w = s > tol
cov = (Vh[w].T / s[w] ** 2) @ Vh[w] # robust covariance matrix
res = jac_loss(deltaG).numpy()
chi2dof = np.sum(res ** 2) / (res.size - deltaG.numpy().size)
cov *= chi2dof
perr = np.sqrt(np.diag(cov))
perr_series = | pd.Series(perr, index=dG.index, name='perr') | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz='Asia/Tokyo'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + int => datetime64tz
# ToDo: must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + object => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
'x',
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_timedelta64(self):
pass
def test_fillna_series_period(self):
pass
def test_fillna_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01',
'2011-01-03', '2011-01-04'])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
exp = pd.Index([pd.Timestamp('2011-01-01'),
1,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# datetime64 + object => object
exp = pd.Index([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_index_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03',
'2011-01-04'], tz=tz)
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01',
'2011-01-03', '2011-01-04'], tz=tz)
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz='Asia/Tokyo'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + int => object
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
1,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# datetime64tz + object => object
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
'x',
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_index_timedelta64(self):
pass
def test_fillna_index_period(self):
pass
class TestReplaceSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
klasses = ['series']
method = 'replace'
def setUp(self):
self.rep = {}
self.rep['object'] = ['a', 'b']
self.rep['int64'] = [4, 5]
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
def _assert_replace_conversion(self, from_key, to_key, how):
index = pd.Index([3, 4], name='xxx')
obj = pd.Series(self.rep[from_key], index=index, name='yyy')
self.assertEqual(obj.dtype, from_key)
if how == 'dict':
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == 'series':
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
# buggy on windows for bool/int64
if (from_key == 'bool' and
to_key == 'int64' and
tm.is_platform_windows()):
pytest.skip("windows platform buggy: {0} -> {1}".format
(from_key, to_key))
if ((from_key == 'float64' and
to_key in ('bool', 'int64')) or
(from_key == 'complex128' and
to_key in ('bool', 'int64', 'float64')) or
(from_key == 'int64' and
to_key in ('bool')) or
# TODO_GH12747 The result must be int?
(from_key == 'bool' and to_key == 'int64')):
# buggy on 32-bit
if | tm.is_platform_32bit() | pandas.util.testing.is_platform_32bit |
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
#from torchvision import datasets
#from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
sentences=[
"what is the weather",
"the weather outside is frightful",
"i have a basketball game today",
"the weather is cold today",
"the weather is hot today",
"hot weather is not enjoyable",
"when is the weather going to get better",
"the basketball game was long",
"how much are the basketball tickets",
"what does the fox say"
]
tokens = [i.split() for i in sentences]
all_tokens=[]
for i in tokens:
all_tokens=all_tokens+i
all_tokens=list(set(all_tokens))
all_pairs1=[[[j[i],j[i+1]] for i in range(len(j)-1)] for j in tokens]
all_pairs2=[[[j[i+1],j[i]] for i in range(len(j)-1)] for j in tokens]
token_cooccur=all_pairs1+all_pairs2
token_cooccur[1]
all_pairs=[]
for i in token_cooccur:
for j in i:
all_pairs.append(j)
X= | pd.DataFrame() | pandas.DataFrame |
"""Generate BSA database for sharded dataset."""
import multiprocessing as mp
import os
import timeit
import click
import pandas as pd
import parallel as par
import atom3d.datasets.ppi.bsa as bsa
import atom3d.datasets.ppi.neighbors as nb
import atom3d.shard.shard as sh
import atom3d.util.log as log
logger = log.get_logger('bsa')
db_sem = mp.Semaphore()
@click.command(help='Generate Buried Surface Area database for sharded.')
@click.argument('sharded_path', type=click.Path())
@click.argument('output_bsa', type=click.Path())
@click.option('-n', '--num_threads', default=8,
help='Number of threads to use for parallel processing.')
def bsa_db(sharded_path, output_bsa, num_threads):
sharded = sh.Sharded.load(sharded_path)
num_shards = sharded.get_num_shards()
dirname = os.path.dirname(output_bsa)
if dirname != '':
os.makedirs(dirname, exist_ok=True)
inputs = [(sharded, x, output_bsa) for x in range(num_shards)]
logger.info(f'{num_shards:} shards to do.')
logger.info(f'Using {num_threads:} threads')
par.submit_jobs(_bsa_db, inputs, num_threads)
def _bsa_db(sharded, shard_num, output_bsa):
logger.info(f'Processing shard {shard_num:}')
start_time = timeit.default_timer()
start_time_reading = timeit.default_timer()
shard = sharded.read_shard(shard_num)
elapsed_reading = timeit.default_timer() - start_time_reading
start_time_waiting = timeit.default_timer()
with db_sem:
start_time_reading = timeit.default_timer()
if os.path.exists(output_bsa):
curr_bsa_db = pd.read_csv(output_bsa).set_index(['ensemble'])
else:
curr_bsa_db = None
tmp_elapsed_reading = timeit.default_timer() - start_time_reading
elapsed_waiting = timeit.default_timer() - start_time_waiting - \
tmp_elapsed_reading
elapsed_reading += tmp_elapsed_reading
start_time_processing = timeit.default_timer()
all_results = []
cache = {}
for e, ensemble in shard.groupby('ensemble'):
if (curr_bsa_db is not None) and (e in curr_bsa_db.index):
continue
(name0, name1, _, _), (bdf0, bdf1, _, _) = nb.get_subunits(ensemble)
try:
# We use bound for indiviudal subunits in bsa computation, as
# sometimes the actual structure between bound and unbound differ.
if name0 not in cache:
cache[name0] = bsa._compute_asa(bdf0)
if name1 not in cache:
cache[name1] = bsa._compute_asa(bdf1)
result = bsa.compute_bsa(bdf0, bdf1, cache[name0], cache[name1])
result['ensemble'] = e
all_results.append(result)
except AssertionError as e:
logger.warning(e)
logger.warning(f'Failed BSA on {e:}')
if len(all_results) > 0:
to_add = pd.concat(all_results, axis=1).T
elapsed_processing = timeit.default_timer() - start_time_processing
if len(all_results) > 0:
start_time_waiting = timeit.default_timer()
with db_sem:
start_time_writing = timeit.default_timer()
# Update db in case it has updated since last run.
if os.path.exists(output_bsa):
curr_bsa_db = pd.read_csv(output_bsa)
new_bsa_db = | pd.concat([curr_bsa_db, to_add]) | pandas.concat |
# factor_alphas.py
# -------
# Writes a new CSV with Fama-French factor alphas
# 5 Factor: https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/Data_Library/f-f_5_factors_2x3.html
# 3 Factor: https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/Data_Library/f-f_factors.html
# Momentum Factor: https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/Data_Library/det_mom_factor.html
# SMB is defined differently for 5 Factor and 3 Factor data
import pandas as pd
import numpy as np
import statsmodels.formula.api as sm
crsp = pd.read_csv("crsp_full_month.csv", dtype={'date': np.object})
crsp.dropna(inplace=True)
crsp = crsp.drop_duplicates(subset=['date', 'TICKER'], keep=False)
crsp = crsp[['date', 'TICKER', 'RET']]
crsp = crsp[(crsp['RET'] != 'R') & (crsp['RET'] != 'C')]
crsp["RET"] = crsp["RET"].apply(pd.to_numeric)
crsp['date'] = crsp['date'].str.slice(0, 6)
# Generate (CAPM) 1 factor alphas, 3 factor alphas, 4 factor alphas
factors = pd.read_csv("factors3.csv", dtype={'DATE': np.object})
mom = pd.read_csv("factors_mom.csv", dtype={'DATE': np.object})
factors = factors.merge(mom, on='DATE')
factors.rename(columns={'DATE': 'date'}, inplace=True)
factors["Mkt"] = factors["Mkt-RF"] + factors["RF"]
factors = factors[["date", "SMB", "HML", "Mkt", "Mom"]]
crsp3 = crsp.merge(factors, on='date')
tickers = crsp3['TICKER'].unique().tolist()
out_f3 = | pd.DataFrame({"date": [], "TICKER": [], "ffalpha": []}) | pandas.DataFrame |
# --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore')
#Reading file
bank_data = pd.read_csv(path)
#Code starts here
bank=pd.DataFrame(bank_data)
categorical_var=bank.select_dtypes(include='object')
print(categorical_var.shape)
numerical_var=bank.select_dtypes(include='number')
print(numerical_var.shape)
bank.drop('Loan_ID',inplace=True,axis=1)
banks= | pd.DataFrame(bank) | pandas.DataFrame |
""" OOI Object """
import datetime
import logging
import os
import re
import threading
import time
import warnings
from io import StringIO
from queue import Queue
import gevent
import pandas as pd
import pytz
import requests
import s3fs
import urllib3
import xarray as xr
from dateutil import parser
from lxml.html import fromstring as html_parser
from yodapy.datasources.ooi.CAVA import CAVA
from yodapy.datasources.ooi.helpers import set_thread
from yodapy.utils.conn import (
download_url,
fetch_url,
fetch_xr,
get_download_urls,
instrument_to_query,
perform_ek60_download,
perform_ek60_processing,
)
from yodapy.utils.files import CREDENTIALS_FILE
from yodapy.utils.parser import (
get_instrument_list,
get_nc_urls,
parse_annotations_json,
parse_deployments_json,
parse_global_range_dataframe,
parse_parameter_streams_dataframe,
parse_raw_data_catalog,
parse_streams_dataframe,
parse_toc_instruments,
unix_time_millis,
)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.basicConfig(
level=logging.INFO, format="(%(threadName)-10s) %(message)s"
)
logger = logging.getLogger(__name__)
print_lock = threading.Lock()
DATA_TEAM_GITHUB_INFRASTRUCTURE = "https://raw.githubusercontent.com/ooi-data-review/datateam-portal-backend/master/infrastructure"
FILE_SYSTEM = s3fs.S3FileSystem(anon=True)
BUCKET_DATA = "io2data/data"
class OOI(CAVA):
"""OOI Object for Ocean Observatories Initiative Data Retrieval.
Attributes:
ooi_name (str): Username for OOI API Data Access.
ooi_token (str): Token for OOI API Data Access.
source_name (str): Data source name.
regions (pandas.DataFrame): Table of OOI regions.
sites (pandas.DataFrame): Table of OOI sites.
instruments (pandas.DataFrame): Table of available instrument streams.
global_ranges (pandas.DataFrame): Table of global ranges for each instrument streams.
deployments (pandas.DataFrame): Table of deployments for filtered instrument streams.
annotations (pandas.DataFrame): Table of annotations for filtered instrument streams.
start_date (list): List of start dates requested.
end_date (list): List of end dates requested.
last_request (list): List of requested urls and parameters.
last_m2m_urls (list): List of requested M2M urls.
cava_arrays (pandas.DataFrame): Cabled array team Arrays vocab table.
cava_sites (pandas.DataFrame): Cabled array team Sites vocab table.
cava_infrastructures (pandas.DataFrame): Cabled array team Infrastructures vocab table.
cava_instruments (pandas.DataFrame): Cabled array team Instruments vocab table.
cava_parameters (pandas.DataFrame): Cabled array team Parameters vocab table.
"""
def __init__(
self, ooi_username=None, ooi_token=None, cloud_source=False, **kwargs
):
super().__init__()
self._source_name = "OOI"
self._start_date = None
self._end_date = None
# Private global variables
self._OOI_M2M_VOCAB = (
"https://ooinet.oceanobservatories.org/api/m2m/12586/vocab"
)
self._OOI_M2M_TOC = "https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/toc"
self._OOI_M2M_STREAMS = (
"https://ooinet.oceanobservatories.org/api/m2m/12575/stream"
)
self._OOI_DATA_URL = (
"https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv"
)
self._OOI_M2M_ANNOTATIONS = (
"https://ooinet.oceanobservatories.org/api/m2m/12580/anno/find"
)
self._OOI_M2M_DEPLOYMENT_QUERY = "https://ooinet.oceanobservatories.org/api/m2m/12587/events/deployment/query"
# From visualocean
self._OOI_VISUALOCEAN_M_STATS = (
"https://ooi-visualocean.whoi.edu/instruments/stats-monthly"
)
self._OOI_GLOBAL_RANGE = "https://raw.githubusercontent.com/ooi-integration/qc-lookup/master/data_qc_global_range_values.csv"
# From GitHub
self._OOI_PORTAL_REGIONS = (
f"{DATA_TEAM_GITHUB_INFRASTRUCTURE}/regions.csv"
)
self._OOI_PORTAL_SITES = f"{DATA_TEAM_GITHUB_INFRASTRUCTURE}/sites.csv"
# Not used
# self._OOI_VOCAB = 'https://raw.githubusercontent.com/ooi-integration/asset-management/master/vocab/vocab.csv'
self._regions = None
self._sites = None
# User inputs
self.ooi_username = ooi_username
self.ooi_token = ooi_token
# Private cache variables
self._rvocab = None
self._rglobal_range = None
self._rstreams = None
self._rtoc = None
self._raw_datadf = None
self._raw_data_url = None
# For bio-acoustic sonar
self._zplsc_data_catalog = None
self._raw_file_dict = None
self._data_type = None
self._current_data_catalog = None
self._filtered_data_catalog = None
self._q = None
self._raw_data = []
self._dataset_list = []
self._netcdf_urls = []
# Cloud copy
self._s3content = None
self._cloud_source = cloud_source
# ----------- Session Configs ---------------------
self._session = requests.Session()
self._pool_connections = kwargs.get("pool_connections", 100)
self._pool_maxsize = kwargs.get("pool_maxsize", 100)
self._adapter = requests.adapters.HTTPAdapter(
pool_connections=self._pool_connections,
pool_maxsize=self._pool_maxsize,
)
self._session.mount("https://", self._adapter)
self._session.verify = False
# --------------------------------------------------
self._request_urls = None
self._last_m2m_urls = []
self._last_download_list = None
self._last_downloaded_netcdfs = None
self._thread_list = []
self._setup()
@property
def regions(self):
""" Returns the OOI regions """
if not isinstance(self._regions, pd.DataFrame):
try:
self._regions = pd.read_csv(self._OOI_PORTAL_REGIONS).rename(
{
"reference_designator": "array_rd",
"name": "region_name",
},
axis="columns",
)
except Exception as e:
logger.error(e)
return self._regions
@property
def sites(self):
""" Returns the OOI sites """
if not isinstance(self._sites, pd.DataFrame):
try:
self._sites = (
pd.read_csv(self._OOI_PORTAL_SITES)
.dropna(subset=["longitude", "latitude"]) # noqa
.rename(
{
"reference_designator": "site_rd",
"name": "site_name",
},
axis="columns",
)
)
except Exception as e:
logger.error(e)
return self._sites
@property
def instruments(self):
def threads_alive(t):
return not t.is_alive()
if all(list(map(threads_alive, self._thread_list))):
""" Returns instruments dataframe """
if isinstance(self._filtered_data_catalog, pd.DataFrame):
return get_instrument_list(self._filtered_data_catalog)
if isinstance(self._current_data_catalog, pd.DataFrame):
return get_instrument_list(self._current_data_catalog)
else:
message = "Please wait while we fetch the metadata ..."
logger.info(message)
@property
def deployments(self):
""" Return instruments deployments """
instrument_list = self._current_data_catalog
if isinstance(self._filtered_data_catalog, pd.DataFrame):
instrument_list = self._filtered_data_catalog
if len(instrument_list) <= 50:
text = f"Fetching deployments from {len(instrument_list)} unique instrument streams..." # noqa
print(text) # noqa
logger.info(text)
dflist = [
self._get_deployments(inst)
for idx, inst in instrument_list.iterrows()
] # noqa
return pd.concat(dflist).reset_index(drop="index")
else:
raise Exception(
f"You have {len(instrument_list)} unique streams; too many to fetch deployments. Please filter by performing search."
) # noqa
@property
def annotations(self):
""" Return instruments annotations """
instrument_list = self._current_data_catalog
if isinstance(self._filtered_data_catalog, pd.DataFrame):
instrument_list = self._filtered_data_catalog
if len(instrument_list) <= 20:
text = f"Fetching annotations from {len(instrument_list)} unique instrument streams..." # noqa
print(text) # noqa
logger.info(text)
dflist = [
self._get_annotations(inst)
for idx, inst in instrument_list.iterrows()
] # noqa
return pd.concat(dflist).reset_index(drop="index")
else:
raise Exception(
f"You have {len(instrument_list)} unique streams; too many to fetch annotations. Please filter by performing search."
) # noqa
@property
def start_date(self):
""" Return requested start date(s) """
if isinstance(self._start_date, pd.Series):
return self._start_date
return "Start date(s) can't be found."
@property
def end_date(self):
""" Return requested end date(s) """
if isinstance(self._end_date, pd.Series):
return self._end_date
return "End date(s) can't be found."
@property
def source_name(self):
""" Return data source name """
return self._source_name
@property
def last_requests(self):
""" Return last request url and parameters """
if self._request_urls:
return self._request_urls
return "Data request has not been made."
@property
def last_m2m_urls(self):
""" Return last request m2m urls """
if self._last_m2m_urls:
return self._last_m2m_urls
return "Data request has not been made."
@property
def global_ranges(self):
""" Return global ranges """
return self._get_global_ranges()
def view_instruments(self):
"""
**DEPRECATED.**
Shows the current instruments requested.
Use OOI.instruments attribute instead.
Returns:
DataFrame: Pandas dataframe of the instruments.
"""
warnings.warn(
"The function view_instruments is deprecated. Please use OOI.instruments attribute instead.",
DeprecationWarning,
stacklevel=2,
)
return self.instruments
def view_regions(self):
"""
**DEPRECATED.**
Shows the regions within OOI.
Use OOI.regions attribute instead.
Returns:
DataFrame: Pandas dataframe of the regions.
"""
warnings.warn(
"The function view_regions is deprecated. Please use OOI.regions attribute instead.",
DeprecationWarning,
stacklevel=2,
)
return self.regions
def view_sites(self):
"""
**DEPRECATED.**
Shows the sites within OOI.
Use OOI.sites attribute instead.
Returns:
DataFrame: Pandas dataframe of the sites.
"""
warnings.warn(
"The function view_sites is deprecated. Please use OOI.sites attribute instead.",
DeprecationWarning,
stacklevel=2,
)
return self.sites
def __repr__(self):
""" Prints out the representation of the OOI object """
inst_text = "Instrument Stream"
if isinstance(self._current_data_catalog, pd.DataFrame):
data_length = len(
self._current_data_catalog.drop_duplicates(
subset=[
"reference_designator",
"stream_method",
"stream_rd",
]
)
)
else:
data_length = 0
if isinstance(self._filtered_data_catalog, pd.DataFrame):
data_length = len(
self._filtered_data_catalog.drop_duplicates(
subset=[
"reference_designator",
"stream_method",
"stream_rd",
]
)
)
if data_length > 1:
inst_text = inst_text + "s"
return (
f"<Data Source: {self._source_name} ({data_length} {inst_text})>"
) # noqa
def __len__(self):
""" Prints the length of the object """
if isinstance(self._filtered_data_catalog, pd.DataFrame):
return len(
self._filtered_data_catalog.drop_duplicates(
subset=[
"reference_designator",
"stream_method",
"stream_rd",
]
)
)
else:
return 0
def _setup(self):
""" Setup the OOI Instance by fetching data catalog ahead of time """
logger.debug("Setting UFrame credentials.")
if not self.ooi_username or not self.ooi_token:
self._use_existing_credentials()
# Check if ooinet is available
try:
req = requests.get("https://ooinet.oceanobservatories.org")
if req.status_code == 200:
threads = [
("get-data-catalog", self._get_data_catalog),
("get-global-ranges", self._get_global_ranges),
("get-rawdata-filelist", self._get_rawdata_filelist),
] # noqa
for t in threads:
ft = set_thread(*t)
self._thread_list.append(ft)
else:
logger.warning(
f"Server not available, please try again later: {req.status_code}"
)
except Exception as e:
logger.error(f"Server not available, please try again later: {e}")
# Retrieve datasets info in the s3 bucket.
try:
self._s3content = [
os.path.basename(rd) for rd in FILE_SYSTEM.ls(BUCKET_DATA)
]
except Exception as e:
logger.error(e)
def request_data(
self, begin_date, end_date, data_type="netcdf", limit=-1, **kwargs
):
"""
Request data for filtered instruments.
Args:
begin_date (str): Begin date of desired data in ISO-8601 Format.
end_date (str): End date of desired data in ISO-8601 Format.
data_type (str): Desired data type. Either 'netcdf' or 'json'.
limit (int, optional): Desired data points. Required for 'json' ``data_type``. Max is 20000.
**kwargs: Optional Keyword arguments. \n
**time_check** - set to true (default) to ensure the request times fall within the stream data availability \n
**exec_dpa** - boolean value specifying whether to execute all data product algorithms to return L1/L2 parameters (Default is True) \n
**provenance** - boolean value specifying whether provenance information should be included in the data set (Default is True) \n
**email** - provide email.
Returns:
self: Modified OOI Object. Use ``raw()`` to see either data url for netcdf or json result for json.
"""
self._data_type = data_type
begin_dates = list(map(lambda x: x.strip(" "), begin_date.split(",")))
end_dates = list(map(lambda x: x.strip(" "), end_date.split(",")))
data_catalog_copy = self._filtered_data_catalog.copy()
self._q = Queue()
# Limit the number of request
if len(data_catalog_copy) > 6:
text = f"Too many instruments to request data for! Max is 6, you have {len(data_catalog_copy)}" # noqa
logger.error(text)
raise Exception(text)
if len(begin_dates) == 1 and len(end_dates) == 1:
begin_dates = begin_dates[0]
end_dates = end_dates[0]
elif len(begin_dates) != len(end_dates):
logger.warning(
"Please provide the same number of begin and end dates"
)
raise ValueError(
"Please provide the same number of begin and end dates"
)
else:
begin_dates = pd.Series(begin_dates)
end_dates = | pd.Series(end_dates) | pandas.Series |
# !/usr/bin/env python3
import sys
import os
import importlib
import pprint
import pandas as pd
import time
import datetime
sys.path.append(os.path.join(os.path.abspath(__file__ + "../../../")))
lfcli_base = importlib.import_module("py-json.LANforge.lfcli_base")
LFCliBase = lfcli_base.LFCliBase
pandas_extensions = importlib.import_module("py-json.LANforge.pandas_extensions")
port_probe = importlib.import_module("py-json.port_probe")
ProbePort = port_probe.ProbePort
class L3CXProfile(LFCliBase):
def __init__(self,
lfclient_host,
lfclient_port,
local_realm,
side_a_min_bps=256000,
side_b_min_bps=256000,
side_a_max_bps=0,
side_b_max_bps=0,
side_a_min_pdu=-1,
side_b_min_pdu=-1,
side_a_max_pdu=0,
side_b_max_pdu=0,
report_timer_=3000,
name_prefix_="Unset",
number_template_="00000",
mconn=0,
debug_=False):
"""
:param lfclient_host:
:param lfclient_port:
:param local_realm:
:param side_a_min_bps:
:param side_b_min_bps:
:param side_a_max_bps:
:param side_b_max_bps:
:param side_a_min_pdu:
:param side_b_min_pdu:
:param side_a_max_pdu:
:param side_b_max_pdu:
:param name_prefix_: prefix string for connection
:param number_template_: how many zeros wide we padd, possibly a starting integer with left padding
:param mconn: Multi-conn setting for this connection.
:param debug_:
"""
super().__init__(lfclient_host, lfclient_port, _debug=debug_)
self.debug = debug_
self.local_realm = local_realm
self.side_a_min_pdu = side_a_min_pdu
self.side_b_min_pdu = side_b_min_pdu
self.side_a_max_pdu = side_a_max_pdu
self.side_b_max_pdu = side_b_max_pdu
self.side_a_min_bps = side_a_min_bps
self.side_b_min_bps = side_b_min_bps
self.side_a_max_bps = side_a_max_bps
self.side_b_max_bps = side_b_max_bps
self.report_timer = report_timer_
self.created_cx = {}
self.created_endp = {}
self.name_prefix = name_prefix_
self.number_template = number_template_
self.mconn = mconn
def get_cx_count(self):
return len(self.created_cx.keys())
def get_cx_names(self):
return self.created_cx.keys()
def get_cx_report(self):
data = dict()
for cx_name in self.get_cx_names():
data[cx_name] = self.json_get("/cx/" + cx_name).get(cx_name)
return data
def __get_rx_values(self):
cx_list = self.json_get("endp?fields=name,rx+bytes")
if self.debug:
print(self.created_cx.values())
print("==============\n", cx_list, "\n==============")
cx_rx_map = {}
for cx_name in cx_list['endpoint']:
if cx_name != 'uri' and cx_name != 'handler':
for item, value in cx_name.items():
for value_name, value_rx in value.items():
if value_name == 'rx bytes' and item in self.created_cx.values():
cx_rx_map[item] = value_rx
return cx_rx_map
@staticmethod
def __compare_vals(old_list, new_list):
passes = 0
expected_passes = 0
if len(old_list) == len(new_list):
for item, value in old_list.items():
expected_passes += 1
if new_list[item] > old_list[item]:
passes += 1
if passes == expected_passes:
return True
else:
return False
else:
return False
def instantiate_file(self, file_name, file_format):
pass
def monitor(self,
duration_sec=60,
monitor_interval_ms=1,
sta_list=None,
layer3_cols=None,
port_mgr_cols=None,
created_cx=None,
report_file=None,
systeminfopath=None,
output_format=None,
script_name=None,
arguments=None,
compared_report=None,
debug=False):
if duration_sec:
duration_sec = self.parse_time(duration_sec).seconds
else:
raise ValueError("L3CXProfile::monitor wants duration_sec > 1 second")
if duration_sec <= monitor_interval_ms:
raise ValueError("L3CXProfile::monitor wants duration_sec > monitor_interval")
if report_file is None:
raise ValueError("Monitor requires an output file to be defined")
if systeminfopath is None:
raise ValueError("Monitor requires a system info path to be defined")
if created_cx is None:
raise ValueError("Monitor needs a list of Layer 3 connections")
if (monitor_interval_ms is None) or (monitor_interval_ms < 1):
raise ValueError("L3CXProfile::monitor wants monitor_interval >= 1 second")
if layer3_cols is None:
raise ValueError("L3CXProfile::monitor wants a list of column names to monitor")
if output_format:
if output_format.lower() != report_file.split('.')[-1]:
raise ValueError('Filename %s has an extension that does not match output format %s .' % (
report_file, output_format))
else:
output_format = report_file.split('.')[-1]
# default save to csv first
if report_file.split('.')[-1] != 'csv':
report_file = report_file.replace(str(output_format), 'csv', 1)
print("Saving rolling data into..." + str(report_file))
# ================== Step 1, set column names and header row
layer3_cols = [self.replace_special_char(x) for x in layer3_cols]
layer3_fields = ",".join(layer3_cols)
default_cols = ['Timestamp', 'Timestamp milliseconds epoch', 'Timestamp seconds epoch', 'Duration elapsed']
default_cols.extend(layer3_cols)
# append alias to port_mgr_cols if not present needed later
if port_mgr_cols:
if 'alias' not in port_mgr_cols:
port_mgr_cols.append('alias')
if port_mgr_cols:
default_cols.extend(port_mgr_cols)
header_row = default_cols
if port_mgr_cols:
port_mgr_cols = [self.replace_special_char(x) for x in port_mgr_cols]
port_mgr_cols_labelled = []
for col_name in port_mgr_cols:
port_mgr_cols_labelled.append("port mgr - " + col_name)
port_mgr_fields = ",".join(port_mgr_cols)
header_row.extend(port_mgr_cols_labelled)
# create sys info file
systeminfo = self.json_get('/')
sysinfo = [str("LANforge GUI Build: " + systeminfo['VersionInfo']['BuildVersion']),
str("Script Name: " + script_name), str("Argument input: " + str(arguments))]
with open(systeminfopath, 'w') as filehandle:
for listitem in sysinfo:
filehandle.write('%s\n' % listitem)
# ================== Step 2, monitor columns
start_time = datetime.datetime.now()
end_time = start_time + datetime.timedelta(seconds=duration_sec)
passes = 0
expected_passes = 0
old_cx_rx_values = self.__get_rx_values()
# wait 10 seconds to get proper port data
time.sleep(10)
# for x in range(0,int(round(iterations,0))):
initial_starttime = datetime.datetime.now()
timestamp_data = list()
while datetime.datetime.now() < end_time:
t = datetime.datetime.now()
timestamp = t.strftime("%m/%d/%Y %I:%M:%S")
t_to_millisec_epoch = int(self.get_milliseconds(t))
t_to_sec_epoch = int(self.get_seconds(t))
time_elapsed = int(self.get_seconds(t)) - int(self.get_seconds(initial_starttime))
stations = [station.split('.')[-1] for station in sta_list]
stations = ','.join(stations)
if port_mgr_cols:
port_mgr_response = self.json_get("/port/1/1/%s?fields=%s" % (stations, port_mgr_fields))
layer_3_response = self.json_get("/endp/%s?fields=%s" % (created_cx, layer3_fields))
new_cx_rx_values = self.__get_rx_values()
if debug:
print(old_cx_rx_values, new_cx_rx_values)
print("\n-----------------------------------")
print(t)
print("-----------------------------------\n")
expected_passes += 1
if self.__compare_vals(old_cx_rx_values, new_cx_rx_values):
passes += 1
else:
self.fail("FAIL: Not all stations increased traffic")
result = dict() # create dataframe from layer 3 results
if type(layer_3_response) is dict:
for dictionary in layer_3_response['endpoint']:
# if debug:
print('layer_3_data: %s' % dictionary)
result.update(dictionary)
else:
pass
layer3 = pd.DataFrame(result.values())
layer3.columns = ['l3-' + x for x in layer3.columns]
if port_mgr_cols: # create dataframe from port mgr results
result = dict()
if type(port_mgr_response) is dict:
print("port_mgr_response {pmr}".format(pmr=port_mgr_response))
if 'interfaces' in port_mgr_response:
for dictionary in port_mgr_response['interfaces']:
if debug:
print('port mgr data: %s' % dictionary)
result.update(dictionary)
elif 'interface' in port_mgr_response:
dict_update = {port_mgr_response['interface']['alias']: port_mgr_response['interface']}
if debug:
print(dict_update)
result.update(dict_update)
if debug:
print(result)
else:
print('interfaces and interface not in port_mgr_response')
exit(1)
portdata_df = pd.DataFrame(result.values())
print("portdata_df {pd}".format(pd=portdata_df))
portdata_df.columns = ['port-' + x for x in portdata_df.columns]
portdata_df['alias'] = portdata_df['port-alias']
layer3_alias = list() # Add alias to layer 3 dataframe
for cross_connect in layer3['l3-name']:
for port in portdata_df['port-alias']:
if port in cross_connect:
layer3_alias.append(port)
if len(layer3_alias) == layer3.shape[0]:
layer3['alias'] = layer3_alias
else:
raise ValueError("The Stations or Connection on LANforge did not match expected, \
Check if LANForge initial state correct or delete/cleanup corrects")
timestamp_df = pd.merge(layer3, portdata_df, on='alias')
else:
timestamp_df = layer3
probe_port_df_list = list()
for station in sta_list:
probe_port = ProbePort(lfhost=self.lfclient_host,
lfport=self.lfclient_port,
eid_str=station,
debug=self.debug)
probe_results = dict()
probe_port.refreshProbe()
probe_results['Signal Avg Combined'] = probe_port.getSignalAvgCombined()
probe_results['Signal Avg per Chain'] = probe_port.getSignalAvgPerChain()
probe_results['Signal Combined'] = probe_port.getSignalCombined()
probe_results['Signal per Chain'] = probe_port.getSignalPerChain()
if 'Beacon Av Signal' in probe_results.keys():
probe_results['Beacon Avg Signal'] = probe_port.getBeaconSignalAvg()
else:
probe_results['Beacon Avg Signal'] = "0"
# probe_results['HE status'] = probe_port.he
probe_results['TX Bitrate'] = probe_port.tx_bitrate
probe_results['TX Mbps'] = probe_port.tx_mbit
probe_results['TX MCS ACTUAL'] = probe_port.tx_mcs
if probe_port.tx_mcs:
probe_results['TX MCS'] = int(probe_port.tx_mcs) % 8
else:
probe_results['TX MCS'] = probe_port.tx_mcs
probe_results['TX NSS'] = probe_port.tx_nss
probe_results['TX MHz'] = probe_port.tx_mhz
if probe_port.tx_gi:
probe_results['TX GI ns'] = (probe_port.tx_gi * 10**9)
else:
probe_results['TX GI ns'] = probe_port.tx_gi
probe_results['TX Mbps Calc'] = probe_port.tx_mbit_calc
probe_results['TX GI'] = probe_port.tx_gi
probe_results['TX Mbps short GI'] = probe_port.tx_data_rate_gi_short_Mbps
probe_results['TX Mbps long GI'] = probe_port.tx_data_rate_gi_long_Mbps
probe_results['RX Bitrate'] = probe_port.rx_bitrate
probe_results['RX Mbps'] = probe_port.rx_mbit
probe_results['RX MCS ACTUAL'] = probe_port.rx_mcs
if probe_port.rx_mcs:
probe_results['RX MCS'] = int(probe_port.rx_mcs) % 8
else:
probe_results['RX MCS'] = probe_port.rx_mcs
probe_results['RX NSS'] = probe_port.rx_nss
probe_results['RX MHz'] = probe_port.rx_mhz
if probe_port.rx_gi:
probe_results['RX GI ns'] = (probe_port.rx_gi * 10**9)
else:
probe_results['RX GI ns'] = probe_port.rx_gi
probe_results['RX Mbps Calc'] = probe_port.rx_mbit_calc
probe_results['RX GI'] = probe_port.rx_gi
probe_results['RX Mbps short GI'] = probe_port.rx_data_rate_gi_short_Mbps
probe_results['RX Mbps long GI'] = probe_port.rx_data_rate_gi_long_Mbps
probe_df_initial = pd.DataFrame(probe_results.values()).transpose()
probe_df_initial.columns = probe_results.keys()
probe_df_initial.columns = ['probe ' + x for x in probe_df_initial.columns]
probe_df_initial['alias'] = station.split('.')[-1]
probe_port_df_list.append(probe_df_initial)
probe_port_df = | pd.concat(probe_port_df_list) | pandas.concat |
# -*- coding: utf-8 -*-
"""Test evaluator."""
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sktime.benchmarking.evaluation import Evaluator
from sktime.benchmarking.metrics import PairwiseMetric
from sktime.benchmarking.results import RAMResults
from sktime.series_as_features.model_selection import PresplitFilesCV
def dummy_results():
"""Results that are dummy."""
results = RAMResults()
results.cv = PresplitFilesCV()
results.save_predictions(
strategy_name="alg1",
dataset_name="dataset1",
index=np.array([1, 2, 3, 4]),
y_true=np.array([1, 1, 1, 1]),
y_pred=np.array([1, 1, 1, 1]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
fit_estimator_end_time= | pd.to_datetime(1605268801, unit="ms") | pandas.to_datetime |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data.index = data.index.to_period(freq=freq)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, log=self.log, **kwds)
else:
raise ValueError("BarPlot kind must be either 'bar' or 'barh'")
return f
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [com.pprint_thing(key) for key in
range(self.data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
if self.kind == 'bar':
ax.set_xlim((s_edge, e_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(str_index)
if name is not None and self.use_index:
ax.set_xlabel(name)
elif self.kind == 'barh':
# horizontal bars
ax.set_ylim((s_edge, e_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(str_index)
if name is not None and self.use_index:
ax.set_ylabel(name)
else:
raise NotImplementedError(self.kind)
@property
def orientation(self):
if self.kind == 'bar':
return 'vertical'
elif self.kind == 'barh':
return 'horizontal'
else:
raise NotImplementedError(self.kind)
class HistPlot(LinePlot):
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
values = self.data.convert_objects()._get_numeric_data()
values = np.ravel(values)
values = values[~com.isnull(values)]
hist, self.bins = np.histogram(values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _get_plot_function(self):
def plotf(ax, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.bins) - 1)
y = y[~com.isnull(y)]
bottom = self._pos_prior + self.bottom
# ignore style
n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins,
bottom=bottom, **kwds)
self._update_prior(n)
return patches
return plotf
def _make_plot(self):
plotf = self._get_plot_function()
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds['label'] = label
self._maybe_add_color(colors, kwds, style, i)
if style is not None:
kwds['style'] = style
artists = plotf(ax, y, column_num=i, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _post_plot_logic(self):
if self.orientation == 'horizontal':
for ax in self.axes:
ax.set_xlabel('Frequency')
else:
for ax in self.axes:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
sample_range = max(y) - min(y)
ind = np.linspace(min(y) - 0.5 * sample_range,
max(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
def _get_plot_function(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
f = MPLPlot._get_plot_function(self)
def plotf(ax, y, style=None, column_num=None, **kwds):
y = remove_na(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=self.bw_method)
else:
gkde = gaussian_kde(y)
if self.bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
ind = self._get_ind(y)
y = gkde.evaluate(ind)
lines = f(ax, ind, y, style=style, **kwds)
return lines
return plotf
def _post_plot_logic(self):
for ax in self.axes:
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data),
color_kwds='colors'))
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = com.pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [com.pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(label, value) for
label, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type=None, **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
def _get_plot_function(self):
def plotf(ax, y, column_num=None, **kwds):
if y.ndim == 2:
y = [remove_na(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na(y)
bp = ax.boxplot(y, **kwds)
if self.return_type == 'dict':
return bp, bp
elif self.return_type == 'both':
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
return plotf
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid key '{0}' "
"The key must be either {1}".format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
plotf = self._get_plot_function()
if self.subplots:
self._return_obj = compat.OrderedDict()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=i, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [com.pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=0, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, y in self._iter_data()]
labels = [com.pprint_thing(l) for l in labels]
if not self.use_index:
labels = [com.pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot,
'kde': KdePlot, 'hist': HistPlot, 'box': BoxPlot,
'scatter': ScatterPlot, 'hexbin': HexBinPlot,
'area': AreaPlot, 'pie': PiePlot}
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
from pandas import DataFrame
if kind in _dataframe_kinds:
if isinstance(data, DataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, DataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, DataFrame):
if x is not None:
if com.is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
data = data.set_index(x)
if y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
label = kwds['label'] if 'label' in kwds else y
series = data[y].copy() # Don't modify
series.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
com.is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_kind=df_kind,
klass_coord=df_coord, klass_ax=df_ax,
klass_unique=df_unique, klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string
Title to use for the plot
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
layout : tuple (optional)
(rows, columns) for the layout of the plot
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None, # Dataframe unique
subplots=False, sharex=None, sharey=False, layout=None, # Dataframe unique
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, # Dataframe unique
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
"""
If no axes is specified, check whether there are existing figures
If there is no existing figures, _gca() will
create a figure with the default figsize, causing the figsize=parameter to
be ignored.
"""
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : the pandas object holding the data
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
ax : Matplotlib axes object, optional
fontsize : int or string
rot : label rotation angle
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
layout : tuple (optional)
(rows, columns) for the layout of the plot
return_type : {'axes', 'dict', 'both'}, default 'dict'
The kind of object to return. 'dict' returns a dictionary
whose values are the matplotlib Lines of the boxplot;
'axes' returns the matplotlib axes the boxplot is drawn on;
'both' returns a namedtuple with the axes and dict.
When grouping with ``by``, a dict mapping columns to ``return_type``
is returned.
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
lines : dict
ax : matplotlib Axes
(ax, lines): namedtuple
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [com.pprint_thing(x) for x in keys]
values = [remove_na(v) for v in values]
bp = ax.boxplot(values, **kwds)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
else:
ax.set_yticklabels(keys, rotation=rot, fontsize=fontsize)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if return_type is None:
msg = ("\nThe default value for 'return_type' will change to "
"'axes' in a future release.\n To use the future behavior "
"now, set return_type='axes'.\n To keep the previous "
"behavior and silence this warning, set "
"return_type='dict'.")
warnings.warn(msg, FutureWarning)
return_type = 'dict'
if ax is None:
ax = _gca()
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwargs.setdefault('c', plt.rcParams['patch.facecolor'])
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
Parameters
----------
data : DataFrame
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, then used to form histograms for separate groups
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all subplots in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
figsize : tuple
The size of the figure to create in inches by default
layout: (optional) a tuple (rows, columns) for the layout of the histograms
bins: integer, default 10
Number of histogram bins to be used
kwds : other plotting keyword arguments
To be passed to hist function
"""
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize,
sharex=sharex, sharey=sharey, layout=layout, bins=bins,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
**kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate( | com._try_sort(data.columns) | pandas.core.common._try_sort |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# These unit tests are based upon the work done in Cyberpandas (see NOTICE):
# https://github.com/ContinuumIO/cyberpandas/blob/master/cyberpandas/test_ip_pandas.py
from pandas.core.internals import ExtensionBlock
import fletcher as fr
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
TEST_LIST = ["Test", "string", None]
TEST_ARRAY = pa.array(TEST_LIST, type=pa.string())
@pytest.fixture
def test_array_chunked():
chunks = []
for _ in range(10):
chunks.append(pa.array(TEST_LIST))
return pa.chunked_array(chunks)
# ----------------------------------------------------------------------------
# Block Methods
# ----------------------------------------------------------------------------
def test_concatenate_blocks():
v1 = fr.FletcherArray(TEST_ARRAY)
s = pd.Series(v1, index=pd.RangeIndex(3), fastpath=True)
result = pd.concat([s, s], ignore_index=True)
expected = pd.Series(
fr.FletcherArray(pa.array(["Test", "string", None, "Test", "string", None]))
)
tm.assert_series_equal(result, expected)
# ----------------------------------------------------------------------------
# Public Constructors
# ----------------------------------------------------------------------------
def test_series_constructor():
v = fr.FletcherArray(TEST_ARRAY)
result = pd.Series(v)
assert result.dtype == v.dtype
assert isinstance(result._data.blocks[0], ExtensionBlock)
def test_dataframe_constructor():
v = fr.FletcherArray(TEST_ARRAY)
df = pd.DataFrame({"A": v})
assert isinstance(df.dtypes["A"], fr.FletcherDtype)
assert df.shape == (3, 1)
# Test some calls to typical DataFrame functions
str(df)
df.info()
def test_dataframe_from_series_no_dict():
s = pd.Series(fr.FletcherArray(TEST_ARRAY))
result = pd.DataFrame(s)
expected = pd.DataFrame({0: s})
tm.assert_frame_equal(result, expected)
s = pd.Series(fr.FletcherArray(TEST_ARRAY), name="A")
result = pd.DataFrame(s)
expected = | pd.DataFrame({"A": s}) | pandas.DataFrame |
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Union
import pandas as pd
from IPython.display import display
from functools import wraps
class GridFigure:
"""
使用网格视图
"""
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
self.fig = plt.figure(figsize=(14, rows * 7))
self.gs = gridspec.GridSpec(rows, cols, wspace=0.4, hspace=0.3)
self.curr_row = 0
self.curr_col = 0
def next_row(self):
if self.curr_col != 0:
self.curr_row += 1
self.curr_col = 0
subplt = plt.subplot(self.gs[self.curr_row, :])
self.curr_row += 1
def next_cell(self):
if self.curr_col >= self.cols:
self.curr_row += 1
self.curr_col = 0
subplt = plt.subplot(self.gs[self.curr_row, self.curr_col])
self.curr_col += 1
return subplt
def close(self):
plt.close(self.fig)
self.fig = None
self.gs = None
def customize(func):
"""
修饰器,设置输出图像内容与风格
"""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop("set_context", True)
if set_context:
color_palette = sns.color_palette("colorblind")
with plotting_context(), axes_style(), color_palette:
sns.despine(left=True)
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def plotting_context(
context: str = "notebook", font_scale: float = 1.5, rc: dict = None
):
"""
创建默认画图板样式
参数
---
:param context: seaborn 样式
:param font_scale: 设置字体大小
:param rc: 配置标签
"""
if rc is None:
rc = {}
rc_default = {"lines.linewidth": 1.5}
# 如果没有默认设置,增加默认设置
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style: str = "darkgrid", rc: dict = None):
"""
创建默认轴域风格
参数
---
:param style: seaborn 样式
:param rc: dict 配置标签
"""
if rc is None:
rc = {}
rc_default = {}
for name, val in rc_default.items():
rc.set_default(name, val)
return sns.axes_style(style=style, rc=rc)
def print_table(
table: Union[pd.Series, pd.DataFrame], name: str = None, fmt: str = None
):
"""
设置输出的 pandas DataFrame 格式
参数
---
:param table: 输入表格
:param name: 表格列名设置
:param fmt: 设置表格元素展示格式,譬如设置 '{0:.2f}%',那么 100 展示出来就是 '100.00%'
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = | pd.get_option("display.float_format") | pandas.get_option |
import urllib.request, time
import pandas as pd
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from tkinter import Tk, messagebox
from pathlib import Path
from utils import *
from urllib.parse import unquote_plus
from bs4 import BeautifulSoup
def main():
CUR_DIR = Path(__file__).parent
PROGRAM = 'chromedriver.exe'
PATH = CUR_DIR / PROGRAM
URL_FINAL, SEARCH_TERM_FINAL, CLASS_TERM_FINAL, START_DATE_FINAL, END_DATE_FINAL, TIMEOUT, querCSV, querXLSX = GUI()
start_time = time.time()
OPTIONS = webdriver.ChromeOptions()
OPTIONS.add_argument('--headless')
OPTIONS.add_argument('--window-size=%s' % '1920,1080')
try:
DRIVER = webdriver.Chrome(PATH, options=OPTIONS)
DRIVER.get(URL_FINAL)
except WebDriverException:
OPTIONS.binary_location = 'D:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
OPTIONS.add_experimental_option('excludeSwitches', ['enable-logging'])
DRIVER = webdriver.Chrome(PATH, options=OPTIONS)
DRIVER.get(URL_FINAL)
SEARCHBAR = DRIVER.find_element_by_id('iddadosConsulta.pesquisaLivre')
SEARCHBAR.send_keys(SEARCH_TERM_FINAL)
FROM = DRIVER.find_element_by_id('iddadosConsulta.dtInicio')
FROM.send_keys(START_DATE_FINAL)
TO = DRIVER.find_element_by_id('iddadosConsulta.dtFim')
TO.send_keys(END_DATE_FINAL)
CONSULT = DRIVER.find_element_by_id('pbSubmit')
#CLASS = DRIVER.find_element_by_id('classe_selectionText')
CLASS_SEARCH = DRIVER.find_element_by_id('botaoProcurar_classe')
CLASS_SEARCH.click()
try:
WebDriverWait(DRIVER, TIMEOUT).until(expected_conditions.presence_of_element_located((By.CLASS_NAME, 'treeView')))
CLASS_SEARCH_BAR = DRIVER.find_element_by_id('classe_treeSelectFilter')
CLASS_SEARCH_BUTTON = DRIVER.find_element_by_id('filtroButton')
CHECKBOX = DRIVER.find_element_by_id('classe_tree_node_8554')
CLASS_SEARCH_BAR.send_keys(CLASS_TERM_FINAL)
CLASS_SEARCH_BUTTON.click()
CHECKBOX.click()
CONFIRM = DRIVER.find_element_by_xpath('//*[@id="classe_treeSelectContainer"]/div[3]/table/tbody/tr/td/input[1]')
CONFIRM.click()
CONSULT.click()
try:
WebDriverWait(DRIVER, TIMEOUT).until(expected_conditions.presence_of_element_located((By.ID, 'divDadosResultado')))
PARSEURL = DRIVER.current_url
with urllib.request.urlopen(urllib.request.Request(PARSEURL, headers = {'User-Agent': 'Chrome'})) as HTML:
PAGE = HTML.read()
DRIVER.quit()
#PAGE = unquote_plus(str(PAGE))
TREE = BeautifulSoup(PAGE, 'lxml')
#HTML = TREE.prettify()
DATA = [[], [], [], [], [], [], [], []]
for TABLE in TREE.find_all('tr', class_ = 'fundocinza1'):
c=1
for LINE in TABLE.find_all('tr', class_ = 'fonte'):
for EXTRA in LINE.find_all('td', attrs = {'align': 'left', 'colspan': '2'}):
for PROCESS in LINE.find_all('span', class_ = 'fonteNegrito'):
DATA[0].append(tag_cleanup(PROCESS))
EXTRA.replaceWith('')
for LINE2 in LINE.find_all('td', attrs = {'align': 'left'}):
for CONTENT in LINE2.find_all('strong'):
CONTENT.replaceWith('')
DATA[c].append(tag_cleanup(LINE2)[1:])
c+=1
df = | pd.DataFrame() | pandas.DataFrame |
# imports
from os.path import join
import os
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy import interpolate
import analyze
from correction.correct import correct_z_by_xy_surface, correct_z_by_spline
from utils import plot_collections, bin, modify, plotting, fit, functions
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
"""
Blue: #0C5DA5
Green: #00B945
"""
plt.style.use(['science', 'ieee', 'std-colors'])
sci_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
sciblue = '#0C5DA5'
scigreen = '#00B945'
# --- file paths
base_dir = '/Users/mackenzie/Desktop/idpt_experiments/10.07.21-BPE_Pressure_Deflection_20X/analyses/results-04.06.22-min-temp-pos-and-neg/'
svp = base_dir + 'figs/'
rvp = base_dir + 'results/'
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
# READ FILES
# --- --- --- --- --- --- --- --- READ CALIBRATION DIAMETER PARAMETERS
theory_diam_path = base_dir + 'spct-calib-coords/calib_spct_pop_defocus_stats.xlsx'
# --- --- --- --- --- --- --- --- READ CALIBRATION IN-FOCUS COORDS
fpcal = base_dir + 'spct-calib-coords/calib_idpt_pid_defocus_stats_xy.xlsx'
# --- --- --- --- --- --- --- --- READ POSITIVE TEST COORDS
fdir = base_dir + 'test-coords/combined'
files = [f for f in os.listdir(fdir) if f.endswith('.xlsx')]
pids = [float(xf.split('z')[-1].split('um.xlsx')[0]) for xf in files]
pids = sorted(pids)
files = sorted(files, key=lambda x: float(x.split('z')[-1].split('um.xlsx')[0]))
# --- --- --- --- --- --- --- --- READ NEGATIVE TEST COORDS
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
# DEFINE FUNCTIONS
def flip_y(df, y0):
df['y'] = (df['y'] - 512) * -1 + y0
return df
def center_on_origin(df, z0=40, y0=25):
df['z'] = df['z'] - z0
df = flip_y(df, y0)
return df
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
# --- --- --- --- --- --- --- --- ANALYZE TEST COORDINATES
analyze_test = False
if analyze_test:
show_figs = False
save_figs = False
export_results = True
analyze_all_rows = True
plot_raw = False
plot_zcorr = False
analyze_per_particle_precision = True
save_plots, show_plots = False, False
fit_general_surface = True
save_fitted_surface = False
analyze_all_pids = False
analyze_rmsez_by_overlap = True
fit_pid_general_surface = True
show_percent_figs = False
save_percent_figs = False
fit_beam_theory = False
plot_fit_surface = False
filter_z_std = 2
filter_precision = 2
analyze_percent_measure_by_precision = False
export_precision_sweep_results = False
save_precision_sweep_figs = False
save_precision_figs = False
show_precision_figs = False
export_precision_results = False
analyze_combined_tests = True
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- -
# SETUP PROCESS MODIFIERS
# experimental
mag_eff = 20
microns_per_pixel = 0.8
meas_depth = 100
# fitting
kx = 1
ky = 2
# origin centering
z0 = 38 # ~z @ zero deflection (microns)
z0c = 35
y0 = 22.5 # offset from wall boundary (pixels)
# filtering
filter_z = 41
cm_min = 0.5
z_corr_min = 0
min_num_bind = 15
min_num_frames = 5
# --- define beam equations
E = 6e6 # elastic modulus of SILPURAN (Pa)
t = 20e-6 # thickness of SILPURAN sheet (m)
L = 2.5e-3 # width of channel (m)
instRectPlate = functions.fRectangularUniformLoad(plate_width=L, youngs_modulus=E, plate_thickness=t)
f_ssRectPlate = instRectPlate.rectangular_uniformly_loaded_simply_supported_plate
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- -
# ANALYZE IN-FOCUS CALIBRATION COORDINATES FROM SPCT
# read calibration file
dfc = | pd.read_excel(fpcal) | pandas.read_excel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.