prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import requests
import os
import pandas as pd
from datetime import datetime
import csv
def norm(qty, asset):
dps = 6 if (asset == "USDC" or asset == "USDT") else 18
tmp = qty.rjust(dps + 1, "0")
return (tmp[0:-dps] + "." + tmp[-dps:]).rstrip("0").rstrip(".")
def main(in_class="mined", out_class="remove funds"):
wallet = os.environ["ETH_WALLET"]
csv_path = "transactions.csv"
inc = 100
n = 0
new_c = 0
while True:
# Try to read existing csv if available
try:
df = pd.read_csv(csv_path)
except (FileNotFoundError, pd.errors.EmptyDataError, csv.Error):
df = None
# get the next batch of transactions
try:
resp = requests.get(
f"https://api.zksync.io/api/v0.1/account/{wallet}/history/{n}/{inc}"
)
resp.raise_for_status()
except requests.exceptions.HTTPError as err:
raise SystemExit(err)
# get json response
j = resp.json()
# Repeat until response empty
if not j:
break
all_trx = []
# For every transaction in the batch
for t in j:
# clean transaction string
tx_hash = t["hash"].replace("sync-tx:", "")
# skip existing transactions to avoid duplicates
try:
if df.isin([tx_hash]).any().any():
# just for debugging
# print(f"Found duplicate: {tx_hash}, skipping!")
continue
except AttributeError:
pass
# transaction detail shortcut
td = t["tx"]
# init structure for csv
my_trx = {
"Type": None,
"Buy Quantity": None,
"Buy Asset": None,
"Buy Value": None,
"Sell Quantity": None,
"Sell Asset": None,
"Sell Value": None,
"Fee Quantity": None,
"Fee Asset": None,
"Fee Value": None,
"Wallet": "ZKSync",
"Timestamp": None,
}
my_date = t["created_at"]
# convert to datetime object and then to format needed
dto = datetime.strptime(my_date, "%Y-%m-%dT%H:%M:%S.%f%z")
my_trx["Timestamp"] = dto.strftime("%d/%m/%Y %H:%M:%S")
# trx coming in
try:
# Withdrawal to mainnet
if (td["type"] == "Withdraw"):
my_trx["Type"] = "Withdrawal"
my_trx["Sell Quantity"] = norm(td["amount"], td["token"])
my_trx["Sell Asset"] = td["token"]
my_trx["Fee Quantity"] = norm(td["fee"], td["token"])
my_trx["Fee Asset"] = td["token"]
elif (td["type"] == "Transfer"):
if td["to"].lower() == wallet.lower():
# Some fee payments look like zero value transfers to oneself with fees
if (td["from"].lower() == wallet.lower() and td["amount"] == "0" and td["fee"] != "0"):
my_trx["Type"] = "Spend"
my_trx["Sell Quantity"] = 0
my_trx["Sell Asset"] = td["token"]
my_trx["Fee Quantity"] = norm(td["fee"], td["token"])
my_trx["Fee Asset"] = td["token"]
# Incoming tx
else:
my_trx["Type"] = "Income"
my_trx["Buy Quantity"] = norm(td["amount"], td["token"])
my_trx["Buy Asset"] = td["token"]
# Outgoing tx
else:
my_trx["Type"] = "Spend"
my_trx["Sell Quantity"] = norm(td["amount"], td["token"])
my_trx["Sell Asset"] = td["token"]
my_trx["Fee Quantity"] = norm(td["fee"], td["token"])
my_trx["Fee Asset"] = td["token"]
# Swaps are too hard to populate: need to do it manually (e.g. token types are indexed)
elif (td["type"] == "Swap"):
my_trx["Type"] = "Trade"
# Ignore others like ChangePubKey
# Also, I don't know how deposits from L1 are categorised - I've never done one.
else:
print(f"Warning: ignoring transaction type {td['type']}")
continue
except KeyError:
continue
my_trx["operationId"] = tx_hash
all_trx.append(my_trx)
# count new transactions
new_c += 1
# Append to existing df or create new
if df is None or df.empty:
df = | pd.DataFrame(all_trx) | pandas.DataFrame |
from tqdm import tqdm
from functools import partial
import multiprocessing
import pandas as pd
import numpy as np
import argparse
import os
def step0_extract(filename):
df = pd.read_csv(filename, low_memory=False)
print('Total size:\t\t\t\t\t', len(df))
df = df[df['TransactionType'] == 'FI-InvoicedDocument']
print('Total size (transaction type filter):\t\t', len(df))
df = df[['DocumentKey', 'CustomerKey', 'DocumentDate', 'DueDate', 'ClearingDate', 'InvoicedAmount']].dropna()
print('Total size (dropna filter):\t\t\t', len(df))
for col in ['DocumentKey']:
df[col] = df[col].apply(lambda x: x.split('|')[2])
for col in ['DocumentDate', 'DueDate', 'ClearingDate']:
df[col] = pd.to_datetime(df[col])
for col in ['DocumentKey', 'CustomerKey', 'InvoicedAmount']:
df[col] = pd.to_numeric(df[col])
df = df[(df['DocumentDate'] < df['DueDate']) & (df['DocumentDate'] < df['ClearingDate'])]
df = df[df['InvoicedAmount'] >= 1000]
print('Total size (Date and Invoice Amount filters):\t', len(df), '\n')
return df
def step1_organize(filename):
def group(x): return x.groupby(['CustomerKey', 'DueDate'])
def order(x): return x.sort_values(by=['DueDate'], ascending=True, ignore_index=True)
def merge(x1, x2): return pd.merge(order(x1), order(x2), how='left', on=['CustomerKey', 'DueDate'])
df = pd.read_csv(filename, parse_dates=['DocumentDate', 'DueDate', 'ClearingDate'], low_memory=False)
df['OS'] = ((df['ClearingDate'] - (df['DueDate'] - pd.to_timedelta(arg=df['DueDate'].dt.weekday, unit='D')))
.astype('timedelta64[D]') > 0).astype(int)
df_doc = order(group(df)['DocumentDate'].min().reset_index())
df_cle = order(group(df)['ClearingDate'].max().reset_index())
df_gp = pd.DataFrame({
'CustomerKey': df_doc['CustomerKey'].values,
'DocumentDate': df_doc['DocumentDate'].values,
'DueDate': df_doc['DueDate'].values,
'ClearingDate': df_cle['ClearingDate'].values,
})
df_gp = merge(df_gp, group(df).size().reset_index(name='InvoiceCount'))
df_gp = merge(df_gp, group(df[df['OS'] == 1]).size().reset_index(name='OSInvoiceCount'))
df_gp['OSInvoiceCount'] = df_gp['OSInvoiceCount'].fillna(0).astype(int)
df_gp['R_OSInvoiceCount'] = df_gp['OSInvoiceCount'] / df_gp['InvoiceCount']
df_gp['R_OSInvoiceCount'] = df_gp['R_OSInvoiceCount'].fillna(0)
df_gp = merge(df_gp, group(df)['InvoicedAmount'].sum().reset_index(name='InvoiceAmount'))
df_gp = merge(df_gp, group(df[df['OS'] == 1])['InvoicedAmount'].sum().reset_index(name='OSInvoiceAmount'))
df_gp['OSInvoiceAmount'] = df_gp['OSInvoiceAmount'].fillna(0)
df_gp['R_OSInvoiceAmount'] = df_gp['OSInvoiceAmount'] / df_gp['InvoiceAmount']
df_gp['R_OSInvoiceAmount'] = df_gp['R_OSInvoiceAmount'].fillna(0)
return df_gp
def step2_generate(filename):
df = pd.read_csv(filename, parse_dates=['DocumentDate', 'DueDate', 'ClearingDate'], low_memory=False)
df['DaysToEndMonth'] = ((df['DueDate'] + pd.offsets.MonthEnd(0)) - df['DueDate']).dt.days
df['DaysLate'] = ((df['ClearingDate'] - df['DueDate']).dt.days).clip(lower=0)
df['DaysLateAM'] = (df['DaysLate'] - df['DaysToEndMonth']).clip(lower=0)
df['PaymentCategory'] = 0
df.loc[(df['DaysLate'] > 0) & (df['DaysLateAM'] <= 0), 'PaymentCategory'] = 1
df.loc[(df['DaysLate'] > 0) & (df['DaysLateAM'] > 0), 'PaymentCategory'] = 2
features = []
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
for x in tqdm(pool.imap(partial(_historic), df[['CustomerKey', 'DueDate']].values), total=len(df)):
features.append(x)
pool.close()
pool.join()
df = pd.merge(df, pd.DataFrame(features), how='left', on=['CustomerKey', 'DueDate'])
df.drop(['DaysToEndMonth', 'DaysLate', 'DaysLateAM', 'PaymentCategory'], axis=1, inplace=True)
return df
def step3_prepare(filename):
df = pd.read_csv(filename, parse_dates=['DocumentDate', 'DueDate', 'ClearingDate'], low_memory=False)
df['DaysToDueDate'] = (df['DueDate'] - df['DocumentDate']).dt.days
df['DaysToEndMonth'] = ((df['DueDate'] + | pd.offsets.MonthEnd(0) | pandas.offsets.MonthEnd |
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
{"a": "category", "b": "category", "c": CategoricalDtype()},
],
)
def test_categorical_dtype(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["a", "a", "b"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
def test_categorical_dtype_single(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_unsorted(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", "b", "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_missing(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", np.nan, "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(all_parsers):
# see gh-18186
parser = all_parsers
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({"a": Categorical(data, ordered=True)})
actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True
)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_latin1(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
encoding = "latin-1"
expected = parser.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_utf16(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
encoding = "utf-16"
sep = "\t"
expected = parser.read_csv(pth, sep=sep, encoding=encoding)
expected = expected.apply(Categorical)
actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_infer_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),
DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),
]
actuals = parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_explicit_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ["a", "b", "c"]
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),
DataFrame(
{"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)}, index=[2, 3]
),
]
dtype = CategoricalDtype(cats)
actuals = parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"categories",
[["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],
)
def test_categorical_category_dtype(all_parsers, categories, ordered):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(
["a", "b", "b", "c"], categories=categories, ordered=ordered
),
}
)
dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_category_dtype_unsorted(all_parsers):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(["c", "b", "a"])
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),
}
)
result = parser.read_csv(StringIO(data), dtype={"b": dtype})
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_numeric(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = DataFrame({"b": Categorical([1, 1, 2, 3])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_datetime(all_parsers):
parser = all_parsers
dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)
dtype = {"b": CategoricalDtype(dti)}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timestamp(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([Timestamp("2014")])}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timedelta(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(pd.to_timedelta(["1H", "2H", "3H"]))}
data = "b\n1H\n2H\n3H"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
"b\nTrue\nFalse\nNA\nFalse",
"b\ntrue\nfalse\nNA\nfalse",
"b\nTRUE\nFALSE\nNA\nFALSE",
"b\nTrue\nFalse\nNA\nFALSE",
],
)
def test_categorical_dtype_coerces_boolean(all_parsers, data):
# see gh-20498
parser = all_parsers
dtype = {"b": CategoricalDtype([False, True])}
expected = DataFrame({"b": Categorical([True, False, None, False])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_unexpected_categories(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_empty_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
)
expected = DataFrame(
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
)
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two,three"
result = parser.read_csv(
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
)
exp_idx = MultiIndex.from_arrays(
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)], names=["one", "two"]
)
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
with pytest.raises(ValueError, match="Duplicate names"):
data = ""
parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
def test_raise_on_passed_int_dtype_with_nas(all_parsers):
# see gh-2631
parser = all_parsers
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
msg = (
"Integer column has NA values"
if parser.engine == "c"
else "Unable to convert column DOY"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
def test_dtype_with_converters(all_parsers):
parser = all_parsers
data = """a,b
1.1,2.2
1.2,2.3"""
# Dtype spec ignored if converted specified.
with tm.assert_produces_warning(ParserWarning):
result = parser.read_csv(
StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)}
)
expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
("category", DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[])),
(
dict(a="category", b="category"),
DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[]),
),
("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")),
(
"timedelta64[ns]",
DataFrame(
{
"a": Series([], dtype="timedelta64[ns]"),
"b": | Series([], dtype="timedelta64[ns]") | pandas.Series |
import matplotlib.pyplot as plt
#import numpy
import pandas as pd
#This is the evaluation methods used for extracting the data for the test-cases.
#-Hopefully the names of the functions is self-explanatory
all_cost = []
last_costs = []
acceptance = []
temp = []
seconds = []
usages = []
requirements = []
RB_usages = []
no_of_groups = []
data_node = []
parity_node = []
def eval_topology_RB(cost):
list.append(all_cost, cost)
#print(all_cost)
plt.plot(all_cost)
plt.ylabel('Overall latency')
plt.xlabel('Iterations')
return all_cost
def eval_topology_DS(cost):
#print('hej fra eval')
list.append(all_cost, cost)
#print(all_cost)
plt.plot(all_cost)
plt.ylabel('Overall - Latency')
plt.xlabel('Iterations')
return all_cost
def eval_annealing_DS(T):
#print('hej fra eval')
list.append(temp, T)
print('temp', temp)
return
def csv1():
df = pd.DataFrame(temp)
df.to_csv("/home/christopher/IdeaProjects//evaluation/anneal_DS/Exponential/schedule.csv")
return
def eval_seonds_DS(sec):
#print('hej fra eval')
list.append(seconds, sec)
print('sec', sec)
return
def csv_sec():
df = pd.DataFrame(seconds)
df.to_csv("/home/christopher/IdeaProjects/evaluation/scalability-parity/seconds-GNP-200.csv")
return
def eval_usage_DS(usage, req):
#print('hej fra eval')
list.append(usages, usage)
list.append(requirements, req)
print('usage', usage)
print('requirement', req)
return
def csv_usage():
df = | pd.DataFrame(usages) | pandas.DataFrame |
"""
Created on Mon Feb 22 15:52:51 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
import os
import pickle
import calendar
import time
import warnings
from pyproj import Transformer
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
from requests import get
import dataframe_key
def compile_chicago_stations():
"""
Reads data files containing information about docking stations in Chicago
and compiles the data into a dataframe. The dataframe is then saved as a
pickle for further use.
The relevant files can be found at:
https://divvy-tripdata.s3.amazonaws.com/index.html
https://data.cityofchicago.org/Transportation/Divvy-Bicycle-Stations-All-Map/bk89-9dk7
Raises
------
FileNotFoundError
Raised if no data files containing station data are found.
Returns
-------
stat_df : pandas DataFrame
Dataframe of all docking station information.
"""
try:
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
except FileNotFoundError as exc:
print('No pickle found. Creating pickle...')
stat_files = [file for file in os.listdir('data') if 'Divvy_Stations' in file]
col_list = ['id', 'name', 'latitude', 'longitude']
key = {'ID':'id', 'Station Name':'name', 'Latitude':'latitude','Longitude':'longitude'}
try:
stat_df = pd.read_csv(
'data/Divvy_Bicycle_Stations_-_All_-_Map.csv').rename(columns = key)
stat_df = stat_df[col_list]
except FileNotFoundError:
stat_df = pd.DataFrame(columns = col_list)
for file in stat_files:
df = pd.read_csv(f'./data/{file}')[col_list]
stat_df = pd.concat([stat_df, df], sort = False)
if stat_df.size == 0:
raise FileNotFoundError(
'No data files containing station data found. Please read the docstring for more information.') from exc
stat_df.drop_duplicates(subset = 'name', inplace = True)
with open('./python_variables/Chicago_stations.pickle', 'wb') as file:
pickle.dump(stat_df, file)
print('Pickle loaded')
return stat_df
def get_JC_blacklist():
"""
Constructs/updates a blacklist of stations in Jersey City area. The
blacklist is created using historical biketrip datasets for the area.
Use only if you know what you are doing.
The relevant files can be found at:
https://www.citibikenyc.com/system-data
Raises
------
FileNotFoundError
Raised if no Jersey City dataset is found.
Returns
-------
blacklist : list
List of IDs of the Jersey City docking stations.
"""
try:
with open('./python_variables/JC_blacklist', 'rb') as file:
blacklist = pickle.load(file)
except FileNotFoundError:
print('No previous blacklist found. Creating blacklist...')
blacklist = set()
JC_files = [file for file in os.listdir('data') if 'JC' in file]
if len(JC_files) == 0:
raise FileNotFoundError(
'No JC files found. Please have a JC file in the data directory to create/update blacklist.')
for file in JC_files:
df = pd.read_csv('data/' + file)
df = df.rename(columns = dataframe_key.get_key('nyc'))
JC_start_stat_indices = df.loc[df['start_stat_long'] < 74.02]
JC_end_stat_indices = df.loc[df['end_stat_long'] < 74.02]
stat_IDs = set(
df['start_stat_id'][JC_start_stat_indices]) | set(df['end_stat_id'][JC_end_stat_indices])
blacklist = blacklist | stat_IDs
with open('./python_variables/JC_blacklist', 'wb') as file:
pickle.dump(blacklist, file)
print('Blacklist updated')
return blacklist
def days_index(df):
"""
Find indices of daily trips.
Parameters
----------
df : pandas DataFrame
Dataframe containing bikeshare trip data with columns that have been
renamed to the common key.
Returns
-------
d_i : dict
Contains the indices of the first trip per day.
"""
days = df['start_dt'].dt.day
d_i = [(days == i).idxmax() for i in range(1, max(days)+1)]
return dict(zip(range(1, max(days)+1), d_i))
def pickle_data(df, city, year, month):
"""
Generate pickle of days' starting indices.
Parameters
----------
df : pandas DataFrame
bikeshare trip data with columns that have been renamed to the common
key.
city : str
The identification of the city. For a list of supported cities, see
the documentation for the Data class.
year : int
The year of interest in YYYY format.
month : int
The month of interest in MM format.
Returns
-------
d : dict
Contains the indices of the first trip per day.
"""
d = days_index(df)
with open(f'./python_variables/day_index_{city}{year:d}{month:02d}.pickle', 'wb') as file:
pickle.dump(d, file)
return d
def get_data(city, year, month, blacklist=None):
"""
Read data from csv files.
Parameters
----------
city : str
The identification of the city. For a list of supported cities, see
the documentation for the Data class.
year : int
The year of interest in YYYY format.
month : int
The month of interest in MM format.
blacklist : list, optional
List of IDs of stations to remove. Default is None.
Returns
-------
df : pandas DataFrame
Dataframe containing bikeshare trip data.
days : dict
Contains the indices of the first trip per day.
"""
supported_cities = ['nyc', 'sfran', 'sjose',
'washDC', 'chic', 'london',
'oslo', 'edinburgh', 'bergen',
'buenos_aires', 'madrid',
'mexico', 'taipei'] # Remember to update this list
if city not in supported_cities:
raise ValueError("This city is not currently supported. Supported cities are {}".format(supported_cities))
# Make folder for dataframes if not found
if not os.path.exists('python_variables/big_data'):
os.makedirs('python_variables/big_data')
try:
with open(f'./python_variables/big_data/{city}{year:d}{month:02d}_dataframe_blcklst={blacklist}.pickle', 'rb') as file:
df = pickle.load(file)
print('Pickle loaded')
except FileNotFoundError:
print('No dataframe pickle found. Pickling dataframe...')
if city == "nyc":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-citibike-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.citibikenyc.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
try:
with open('./python_variables/JC_blacklist', 'rb') as file:
JC_blacklist = pickle.load(file)
df = df[~df['start_stat_id'].isin(JC_blacklist)]
df = df[~df['end_stat_id'].isin(JC_blacklist)]
except FileNotFoundError:
print('No JC blacklist found. Continuing...')
df.dropna(inplace=True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "washDC":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-capitalbikeshare-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.capitalbikeshare.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df['start_stat_lat'] = ''
df['start_stat_long'] = ''
df['end_stat_lat'] = ''
df['end_stat_long'] = ''
stat_df = pd.read_csv('data/Capital_Bike_Share_Locations.csv')
for _ , stat in stat_df.iterrows():
start_matches = np.where(df['start_stat_id'] == stat['TERMINAL_NUMBER'])
end_matches = np.where(df['end_stat_id'] == stat['TERMINAL_NUMBER'])
df.at[start_matches[0], 'start_stat_lat'] = stat['LATITUDE']
df.at[start_matches[0], 'start_stat_long'] = stat['LONGITUDE']
df.at[end_matches[0], 'end_stat_lat'] = stat['LATITUDE']
df.at[end_matches[0], 'end_stat_long'] = stat['LONGITUDE']
df.replace('', np.nan, inplace = True)
df.dropna(inplace=True)
max_lat = 38.961029
min_lat = 38.792686
max_long= -76.909415
min_long= -77.139396
df = df.iloc[np.where(
(df['start_stat_lat'] < max_lat) &
(df['start_stat_lat'] > min_lat) &
(df['start_stat_long'] < max_long) &
(df['start_stat_long'] > min_long))]
df = df.iloc[np.where(
(df['end_stat_lat'] < max_lat) &
(df['end_stat_lat'] > min_lat) &
(df['end_stat_long'] < max_long) &
(df['end_stat_long'] > min_long))]
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "chic":
q = int(np.ceil(month/3))
try:
df = pd.read_csv(f'./data/Divvy_Trips_{year:d}_Q{q}.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.divvybikes.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
n_days = calendar.monthrange(year, month)[1]
df = df.iloc[np.where(df['start_t'] > f'{year:d}-{month:02d}-01 00:00:00')]
df = df.iloc[np.where(df['start_t'] < f'{year:d}-{month:02d}-{n_days} 23:59:59')]
df.reset_index(inplace = True, drop = True)
df['start_stat_lat'] = ''
df['start_stat_long'] = ''
df['end_stat_lat'] = ''
df['end_stat_long'] = ''
try:
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
except FileNotFoundError as exc:
compile_chicago_stations()
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
for _, stat in stat_df.iterrows():
start_matches = np.where(df['start_stat_name'] == stat['name'])
end_matches = np.where(df['end_stat_name'] == stat['name'])
df.at[start_matches[0], 'start_stat_lat'] = stat['latitude']
df.at[start_matches[0], 'start_stat_long'] = stat['longitude']
df.at[end_matches[0], 'end_stat_lat'] = stat['latitude']
df.at[end_matches[0], 'end_stat_long'] = stat['longitude']
df.replace('', np.nan, inplace = True)
df.dropna(subset = ['start_stat_lat',
'start_stat_long',
'end_stat_lat',
'end_stat_long'], inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
df['duration'] = df['duration'].str.replace(',', '').astype(float)
elif city == "sfran":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-baywheels-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.lyft.com/bikes/bay-wheels/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df = df.iloc[np.where(df['start_stat_lat'] > 37.593220)]
df = df.iloc[np.where(df['end_stat_lat'] > 37.593220)]
df.sort_values(by = 'start_t', inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "sjose":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-baywheels-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.lyft.com/bikes/bay-wheels/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df = df.iloc[np.where(df['start_stat_lat'] < 37.593220)]
df = df.iloc[np.where(df['end_stat_lat'] < 37.593220)]
df.sort_values(by = 'start_t', inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "london":
month_dict = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May',
6:'Jun', 7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct',
11:'Nov', 12:'Dec'}
data_files = [file for file in os.listdir('data') if 'JourneyDataExtract' in file]
data_files = [file for file in data_files if '{}'.format(year)
and '{}'.format(month_dict[month]) in file]
if len(data_files) == 0:
raise FileNotFoundError('No London data for {}. {} found. All relevant files can be found at https://cycling.data.tfl.gov.uk/.'.format(month_dict[month], year))
if isinstance(data_files, str):
warnings.warn('Only one data file found. Please check that you have all available data.')
df = pd.read_csv('./data/' + data_files[0])
for file in data_files[1:]:
df_temp = pd.read_csv('./data/' + file)
df = pd.concat([df, df_temp], sort = False)
df.rename(columns = dataframe_key.get_key(city), inplace = True)
n_days = calendar.monthrange(year, month)[1]
df = df.iloc[np.where(df['start_t'] >= f'01/{month:02d}/{year} 00:00')]
df = df.iloc[np.where(df['start_t'] <= f'{n_days}/{month:02d}/{year} 23:59')]
df.sort_values(by = 'start_t', inplace = True)
df.reset_index(inplace = True)
df['start_t'] = pd.to_datetime(df['start_t'], format = '%d/%m/%Y %H:%M').astype(str)
df['end_t'] = pd.to_datetime(df['end_t'], format = '%d/%m/%Y %H:%M').astype(str)
stat_df = pd.read_csv('./data/london_stations.csv')
stat_df.at[np.where(stat_df['station_id'] == 502)[0][0], 'latitude'] = 51.53341
df['start_stat_lat'] = ''
df['start_stat_long'] = ''
df['end_stat_lat'] = ''
df['end_stat_long'] = ''
for _ , stat in stat_df.iterrows():
start_matches = np.where(df['start_stat_name'] == stat['station_name'])
end_matches = np.where(df['end_stat_name'] == stat['station_name'])
df.at[start_matches[0], 'start_stat_lat'] = stat['latitude']
df.at[start_matches[0], 'start_stat_long'] = stat['longitude']
df.at[end_matches[0], 'end_stat_lat'] = stat['latitude']
df.at[end_matches[0], 'end_stat_long'] = stat['longitude']
df.replace('', np.nan, inplace = True)
df.dropna(inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
df = df[df.start_dt.dt.month == month]
df.reset_index(inplace = True, drop = True)
elif city == "oslo":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-oslo.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://oslobysykkel.no/en/open-data/historical') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "edinburgh":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-edinburgh.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://edinburghcyclehire.com/open-data/historical') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "bergen":
try:
df = | pd.read_csv(f'./data/{year:d}{month:02d}-bergen.csv') | pandas.read_csv |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = | date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx") | pandas.date_range |
## python 101917081.py "C:\Users\hp\Desktop\Assignment-4\Input files for Assignment04\data.csv" "1,1,1,1,1" "+,-,+,-,+" "abcd-result.csv"
from argparse import ArgumentParser
from pathlib import Path
import pandas as pd
import sys
def main():
# total arguments
n = len(sys.argv)
if n<5 :
exit('Enter more arguments')
if n>5 :
exit('Enter less arguments')
input_file = sys.argv[1]
path = Path(input_file)
if not path.is_file():
print('File not found!!')
exit()
df=pd.read_csv(input_file)
if df.shape[1]<3 :
print('Input file contains less 3 columns')
exit()
# Checking whether dataframe contains only numeric values or not
check_numeric= df.apply(lambda s: | pd.to_numeric(s, errors='coerce') | pandas.to_numeric |
# Copyright WillianFuks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module plot.py. Module matplotlib is not required as it's mocked accordingly.
"""
import mock
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
import causalimpact.plot as plotter
@pytest.fixture
def inferences(rand_data):
df = pd.DataFrame(np.random.rand(len(rand_data), 9))
df.columns = [
'complete_preds_means',
'complete_preds_lower',
'complete_preds_upper',
'point_effects_means',
'point_effects_lower',
'point_effects_upper',
'post_cum_effects_means',
'post_cum_effects_lower',
'post_cum_effects_upper'
]
return df
def test_build_data():
pre_data = pd.DataFrame([0, 1, np.nan])
post_data = pd.DataFrame([3, 4, np.nan], index=[3, 4, 5])
inferences = pd.DataFrame([0, 1, 2, 3, 4, 5])
pre_data, post_data, inferences = plotter.build_data(pre_data, post_data, inferences)
expected_pre_data = pd.DataFrame([0, 1]).astype(np.float64)
pd.testing.assert_frame_equal(pre_data, expected_pre_data)
expected_post_data = pd.DataFrame([3, 4], index=[3, 4]).astype(np.float64)
pd.testing.assert_frame_equal(post_data, expected_post_data)
expected_inferences = pd.DataFrame([0, 1, 3, 4],
index=[0, 1, 3, 4]).astype(np.float64)
pd.testing.assert_frame_equal(inferences, expected_inferences)
def test_plot_original_panel(rand_data, pre_int_period, post_int_period, inferences,
monkeypatch):
plot_mock = mock.Mock()
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1]]
post_data = rand_data.loc[post_int_period[0]: post_int_period[1]]
pre_post_index = pre_data.index.union(post_data.index)
monkeypatch.setattr('causalimpact.plot.get_plotter', plot_mock)
plotter.plot(inferences, pre_data, post_data, panels=['original'])
plot_mock.assert_called_once()
plot_mock.return_value.figure.assert_called_with(figsize=(10, 7))
plot_mock.return_value.subplot.assert_any_call(1, 1, 1)
ax_mock = plot_mock.return_value.subplot.return_value
ax_args = ax_mock.plot.call_args_list
assert_array_equal(pre_post_index, ax_args[0][0][0])
assert_array_equal(
pd.concat([pre_data.iloc[:, 0], post_data.iloc[:, 0]]),
ax_args[0][0][1]
)
assert ax_args[0][0][2] == 'k'
assert ax_args[0][1] == {'label': 'y'}
assert_array_equal(pre_post_index[1:], ax_args[1][0][0])
assert_array_equal(inferences['complete_preds_means'].iloc[1:], ax_args[1][0][1])
assert ax_args[1][1] == {'color': 'orangered', 'ls': 'dashed', 'label': 'Predicted'}
ax_mock.axvline.assert_called_with(pre_int_period[1], c='gray', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], pre_post_index[1:])
assert_array_equal(ax_args[0][1], inferences['complete_preds_lower'].iloc[1:])
assert_array_equal(ax_args[0][2], inferences['complete_preds_upper'].iloc[1:])
assert ax_args[1] == {'color': (1.0, 0.4981, 0.0549), 'alpha': 0.4}
ax_mock.grid.assert_called_with(True, color='gainsboro')
ax_mock.legend.assert_called()
plot_mock.return_value.show.assert_called_once()
def test_plot_original_panel_gap_data(rand_data, pre_int_gap_period, post_int_gap_period,
inferences, monkeypatch):
plot_mock = mock.Mock()
pre_data = rand_data.loc[pre_int_gap_period[0]: pre_int_gap_period[1]]
post_data = rand_data.loc[post_int_gap_period[0]: post_int_gap_period[1]]
pre_data = pre_data.set_index(pd.RangeIndex(start=0, stop=len(pre_data)))
post_data = post_data.set_index(pd.RangeIndex(start=len(pre_data),
stop=len(pre_data) + len(post_data)))
pre_post_index = pre_data.index.union(post_data.index)
monkeypatch.setattr('causalimpact.plot.get_plotter', plot_mock)
plotter.plot(inferences, pre_data, post_data, panels=['original'])
plot_mock.assert_called_once()
plot_mock.return_value.figure.assert_called_with(figsize=(10, 7))
plot_mock.return_value.subplot.assert_any_call(1, 1, 1)
ax_mock = plot_mock.return_value.subplot.return_value
ax_args = ax_mock.plot.call_args_list
assert_array_equal(pre_post_index, ax_args[0][0][0])
assert_array_equal(
| pd.concat([pre_data.iloc[:, 0], post_data.iloc[:, 0]]) | pandas.concat |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with | tm.assert_raises_regex(TypeError, msg) | pandas.util.testing.assert_raises_regex |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import pandas as pd
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import pipeline
from activitysim.core import simulate
from activitysim.core import tracing
from activitysim.core import logit
from activitysim.core import expressions
from activitysim.core.interaction_sample_simulate import interaction_sample_simulate
from activitysim.core.util import assign_in_place
from activitysim.core.tracing import print_elapsed_time
from .util import estimation
logger = logging.getLogger(__name__)
NO_DESTINATION = -1
def wrap_skims(model_settings):
"""
wrap skims of trip destination using origin, dest column names from model settings.
Various of these are used by destination_sample, compute_logsums, and destination_simulate
so we create them all here with canonical names.
Note that compute_logsums aliases their names so it can use the same equations to compute
logsums from origin to alt_dest, and from alt_dest to primarly destination
odt_skims - SkimStackWrapper: trip origin, trip alt_dest, time_of_day
dot_skims - SkimStackWrapper: trip alt_dest, trip origin, time_of_day
dpt_skims - SkimStackWrapper: trip alt_dest, trip primary_dest, time_of_day
pdt_skims - SkimStackWrapper: trip primary_dest,trip alt_dest, time_of_day
od_skims - SkimDictWrapper: trip origin, trip alt_dest
dp_skims - SkimDictWrapper: trip alt_dest, trip primary_dest
Parameters
----------
model_settings
Returns
-------
dict containing skims, keyed by canonical names relative to tour orientation
"""
network_los = inject.get_injectable('network_los')
skim_dict = network_los.get_default_skim_dict()
origin = model_settings['TRIP_ORIGIN']
park_zone = model_settings['ALT_DEST_COL_NAME']
destination = model_settings['TRIP_DESTINATION']
time_period = model_settings['TRIP_DEPARTURE_PERIOD']
skims = {
"odt_skims": skim_dict.wrap_3d(orig_key=origin, dest_key=destination, dim3_key=time_period),
"dot_skims": skim_dict.wrap_3d(orig_key=destination, dest_key=origin, dim3_key=time_period),
"opt_skims": skim_dict.wrap_3d(orig_key=origin, dest_key=park_zone, dim3_key=time_period),
"pdt_skims": skim_dict.wrap_3d(orig_key=park_zone, dest_key=destination, dim3_key=time_period),
"od_skims": skim_dict.wrap(origin, destination),
"do_skims": skim_dict.wrap(destination, origin),
"op_skims": skim_dict.wrap(origin, park_zone),
"pd_skims": skim_dict.wrap(park_zone, destination),
}
return skims
def get_spec_for_segment(model_settings, spec_name, segment):
omnibus_spec = simulate.read_model_spec(file_name=model_settings[spec_name])
spec = omnibus_spec[[segment]]
# might as well ignore any spec rows with 0 utility
spec = spec[spec.iloc[:, 0] != 0]
assert spec.shape[0] > 0
return spec
def parking_destination_simulate(
segment_name,
trips,
destination_sample,
model_settings,
skims,
chunk_size, trace_hh_id,
trace_label):
"""
Chose destination from destination_sample (with od_logsum and dp_logsum columns added)
Returns
-------
choices - pandas.Series
destination alt chosen
"""
trace_label = tracing.extend_trace_label(trace_label, 'trip_destination_simulate')
spec = get_spec_for_segment(model_settings, 'SPECIFICATION', segment_name)
alt_dest_col_name = model_settings['ALT_DEST_COL_NAME']
logger.info("Running trip_destination_simulate with %d trips", len(trips))
locals_dict = config.get_model_constants(model_settings).copy()
locals_dict.update(skims)
parking_locations = interaction_sample_simulate(
choosers=trips,
alternatives=destination_sample,
spec=spec,
choice_column=alt_dest_col_name,
want_logsums=False,
allow_zero_probs=True, zero_prob_choice_val=NO_DESTINATION,
skims=skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label,
trace_choice_name='parking_loc')
# drop any failed zero_prob destinations
if (parking_locations == NO_DESTINATION).any():
logger.debug("dropping %s failed parking locations", (parking_locations == NO_DESTINATION).sum())
parking_locations = parking_locations[parking_locations != NO_DESTINATION]
return parking_locations
def choose_parking_location(
segment_name,
trips,
alternatives,
model_settings,
want_sample_table,
skims,
chunk_size, trace_hh_id,
trace_label):
logger.info("choose_parking_location %s with %d trips", trace_label, trips.shape[0])
t0 = print_elapsed_time()
alt_dest_col_name = model_settings['ALT_DEST_COL_NAME']
destination_sample = logit.interaction_dataset(trips, alternatives, alt_index_id=alt_dest_col_name)
destination_sample.index = np.repeat(trips.index.values, len(alternatives))
destination_sample.index.name = trips.index.name
destination_sample = destination_sample[[alt_dest_col_name]].copy()
# # - trip_destination_simulate
destinations = parking_destination_simulate(
segment_name=segment_name,
trips=trips,
destination_sample=destination_sample,
model_settings=model_settings,
skims=skims,
chunk_size=chunk_size, trace_hh_id=trace_hh_id,
trace_label=trace_label)
if want_sample_table:
# FIXME - sample_table
destination_sample.set_index(model_settings['ALT_DEST_COL_NAME'], append=True, inplace=True)
else:
destination_sample = None
t0 = print_elapsed_time("%s.parking_location_simulate" % trace_label, t0)
return destinations, destination_sample
def run_parking_destination(
model_settings,
trips, land_use,
chunk_size, trace_hh_id,
trace_label,
fail_some_trips_for_testing=False):
chooser_filter_column = model_settings.get('CHOOSER_FILTER_COLUMN_NAME')
chooser_segment_column = model_settings.get('CHOOSER_SEGMENT_COLUMN_NAME')
parking_location_column_name = model_settings['ALT_DEST_COL_NAME']
sample_table_name = model_settings.get('DEST_CHOICE_SAMPLE_TABLE_NAME')
want_sample_table = config.setting('want_dest_choice_sample_tables') and sample_table_name is not None
choosers = trips[trips[chooser_filter_column]]
choosers = choosers.sort_index()
# Placeholder for trips without a parking choice
trips[parking_location_column_name] = -1
skims = wrap_skims(model_settings)
alt_column_filter_name = model_settings.get('ALTERNATIVE_FILTER_COLUMN_NAME')
alternatives = land_use[land_use[alt_column_filter_name]]
# don't need size terms in alternatives, just TAZ index
alternatives = alternatives.drop(alternatives.columns, axis=1)
alternatives.index.name = parking_location_column_name
choices_list = []
sample_list = []
for segment_name, chooser_segment in choosers.groupby(chooser_segment_column):
if chooser_segment.shape[0] == 0:
logger.info("%s skipping segment %s: no choosers", trace_label, segment_name)
continue
choices, destination_sample = choose_parking_location(
segment_name,
chooser_segment,
alternatives,
model_settings,
want_sample_table,
skims,
chunk_size, trace_hh_id,
trace_label=tracing.extend_trace_label(trace_label, segment_name))
choices_list.append(choices)
if want_sample_table:
assert destination_sample is not None
sample_list.append(destination_sample)
if len(choices_list) > 0:
parking_df = pd.concat(choices_list)
if fail_some_trips_for_testing:
parking_df = parking_df.drop(parking_df.index[0])
assign_in_place(trips, parking_df.to_frame(parking_location_column_name))
trips[parking_location_column_name] = trips[parking_location_column_name].fillna(-1)
else:
trips[parking_location_column_name] = -1
save_sample_df = | pd.concat(sample_list) | pandas.concat |
import unittest
import numpy as np
import pandas as pd
from haychecker.chc.metrics import rule
class TestRule(unittest.TestCase):
def test_empty(self):
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module main.py. Fixtures comes from file conftest.py located at the same dir
of this file.
"""
from __future__ import absolute_import, division, print_function
import os
import mock
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from pandas.core.indexes.range import RangeIndex
from pandas.util.testing import assert_frame_equal
from statsmodels.tsa.statespace.structural import (
UnobservedComponents, UnobservedComponentsResultsWrapper)
from causalimpact import CausalImpact
from causalimpact.misc import standardize
def test_default_causal_cto(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
rand_data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_w_date(date_rand_data, pre_str_period, post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
assert_frame_equal(ci.data, date_rand_data)
assert ci.pre_period == pre_str_period
assert ci.post_period == post_str_period
pre_data = date_rand_data.loc[pre_str_period[0]: pre_str_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = date_rand_data.loc[post_str_period[0]: post_str_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
date_rand_data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_no_exog(rand_data, pre_int_period, post_int_period):
rand_data = pd.DataFrame(rand_data.iloc[:, 0])
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert ci.model.exog is None
assert ci.model.endog_names == 'y'
assert ci.model.exog_names is None
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_w_np_array(rand_data, pre_int_period, post_int_period):
data = rand_data.values
ci = CausalImpact(data, pre_int_period, post_int_period)
assert_array_equal(ci.data, data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = pd.DataFrame(data[pre_int_period[0]: pre_int_period[1] + 1, :])
assert_frame_equal(ci.pre_data, pre_data)
post_data = pd.DataFrame(data[post_int_period[0]: post_int_period[1] + 1, :])
post_data.index = RangeIndex(start=len(pre_data), stop=len(rand_data))
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == [1, 2]
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_causal_cto_w_no_standardization(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period, standardize=False)
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert ci.normed_pre_data is None
assert ci.normed_post_data is None
assert ci.mu_sig is None
assert_array_equal(ci.model.endog, pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, pre_data.iloc[:, 1:].values.reshape(
-1,
rand_data.shape[1] - 1
)
)
assert ci.p_value > 0 and ci.p_value < 1
def test_causal_cto_w_seasons(date_rand_data, pre_str_period, post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period,
nseasons=[{'period': 4}, {'period': 3}])
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [2, 1]
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period,
nseasons=[{'period': 4, 'harmonics': 1},
{'period': 3, 'harmonis': 1}])
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [1, 1]
def test_causal_cto_w_custom_model_and_seasons(rand_data, pre_int_period,
post_int_period):
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
model = UnobservedComponents(endog=pre_data.iloc[:, 0], level='llevel',
exog=pre_data.iloc[:, 1:],
freq_seasonal=[{'period': 4}, {'period': 3}])
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model=model)
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [2, 1]
def test_causal_cto_w_custom_model(rand_data, pre_int_period, post_int_period):
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
model = UnobservedComponents(endog=pre_data.iloc[:, 0], level='llevel',
exog=pre_data.iloc[:, 1:])
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model=model)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
def test_causal_cto_raises_on_None_input(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(None, pre_int_period, post_int_period)
assert str(excinfo.value) == 'data input cannot be empty'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, None, post_int_period)
assert str(excinfo.value) == 'pre_period input cannot be empty'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, None)
assert str(excinfo.value) == 'post_period input cannot be empty'
def test_invalid_data_input_raises():
with pytest.raises(ValueError) as excinfo:
CausalImpact('test', [0, 5], [5, 10])
assert str(excinfo.value) == 'Could not transform input data to pandas DataFrame.'
data = [1, 2, 3, 4, 5, 6, 2 + 1j]
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 3], [3, 6])
assert str(excinfo.value) == 'Input data must contain only numeric values.'
data = np.random.randn(10, 2)
data[0, 1] = np.nan
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 3], [3, 6])
assert str(excinfo.value) == 'Input data cannot have NAN values.'
def test_invalid_response_raises():
data = np.random.rand(100, 2)
data[:, 0] = np.ones(len(data)) * np.nan
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == 'Input response cannot have just Null values.'
data[0:2, 0] = 1
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == ('Input response must have more than 3 non-null points '
'at least.')
data[0:3, 0] = 1
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == 'Input response cannot be constant.'
def test_invalid_alpha_raises(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, alpha=1)
assert str(excinfo.value) == 'alpha must be of type float.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, alpha=2.)
assert str(excinfo.value) == (
'alpha must range between 0 (zero) and 1 (one) inclusive.'
)
def test_custom_model_input_validation(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model='test')
assert str(excinfo.value) == 'Input model must be of type UnobservedComponents.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.level = False
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have level attribute set.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.exog = None
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have exog attribute set.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.data = None
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have data attribute set.'
def test_kwargs_validation(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize='yes')
assert str(excinfo.value) == 'Standardize argument must be of type bool.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[7])
assert str(excinfo.value) == (
'nseasons must be a list of dicts with the required key "period" and the '
'optional key "harmonics".'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[{'test': 8}])
assert str(excinfo.value) == 'nseasons dicts must contain the key "period" defined.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[{'period': 4, 'harmonics': 3}])
assert str(excinfo.value) == (
'Total harmonics must be less or equal than periods divided by 2.')
def test_periods_validation(rand_data, date_rand_data):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 10], [4, 7])
assert str(excinfo.value) == (
'Values in training data cannot be present in the '
'post-intervention data. Please fix your pre_period value to cover at most one '
'point less from when the intervention happened.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180201'],
['20180110', '20180210'])
assert str(excinfo.value) == (
'Values in training data cannot be present in the '
'post-intervention data. Please fix your pre_period value to cover at most one '
'point less from when the intervention happened.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 10], [15, 11])
assert str(excinfo.value) == 'post_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180110'],
['20180115', '20180111'])
assert str(excinfo.value) == 'post_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 2], [15, 11])
assert str(excinfo.value) == 'pre_period must span at least 3 time points.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180102'],
['20180115', '20180111'])
assert str(excinfo.value) == 'pre_period must span at least 3 time points.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 0], [15, 11])
assert str(excinfo.value) == 'pre_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180105', '20180101'],
['20180115', '20180111'])
assert str(excinfo.value) == 'pre_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, 0, [15, 11])
assert str(excinfo.value) == 'Input period must be of type list.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, '20180101', ['20180115', '20180130'])
assert str(excinfo.value) == 'Input period must be of type list.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 10, 30], [15, 11])
assert str(excinfo.value) == (
'Period must have two values regarding the beginning '
'and end of the pre and post intervention data.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, None], [15, 11])
assert str(excinfo.value) == 'Input period cannot have `None` values.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 5.5], [15, 11])
assert str(excinfo.value) == 'Input must contain either int, str or pandas Timestamp'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [-2, 10], [11, 20])
assert str(excinfo.value) == (
'-2 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 10], [11, 2000])
assert str(excinfo.value) == (
'2000 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, ['20180101', '20180110'],
['20180111', '20180130'])
assert str(excinfo.value) == (
'20180101 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180110'],
['20180111', '20200130'])
assert str(excinfo.value) == ('20200130 not present in input data index.')
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20170101', '20180110'],
['20180111', '20180120'])
assert str(excinfo.value) == ('20170101 not present in input data index.')
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [pd.Timestamp('20180101'), | pd.Timestamp('20180110') | pandas.Timestamp |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = | pd.to_datetime(temp_se.iloc[:, 0]) | pandas.to_datetime |
import logging
import pandas as pd
import numpy as np
from binance.client import Client
_logger = logging.getLogger(__name__)
def get_metadata(sid_map):
client = Client("", "")
metadata = pd.DataFrame(
np.empty(
len(sid_map),
dtype=[
('symbol', 'str'),
('asset_name', 'str')
]
)
)
for sid, symbol in sid_map:
res = client.get_symbol_info(symbol)
metadata.loc[sid, 'symbol'] = symbol
metadata.loc[sid, 'asset_name'] = res['baseAsset']
#metadata.loc[sid, 'start_date'] = pd.to_datetime(start_session) # this is a hack
metadata['exchange'] = 'binance'
return metadata
def _get_historical_klines(
sid_map,
start_session,
end_session,
cache,
timeframe,
show_progress):
client = Client("", "")
for sid, symbol in sid_map:
key = symbol + '-' + timeframe
if key not in cache:
cache[key] = pd.DataFrame()
while cache[key].empty or cache[key].index[-1] < end_session:
cursor = start_session if cache[key].empty else cache[key].index[-1]
_res = client.get_historical_klines(symbol, timeframe, str(cursor), str(end_session), 720)
if not _res:
break
res = pd.DataFrame(_res).drop(columns=list(range(6,12)))
res.columns = ['date', 'open', 'high', 'low', 'close', 'volume']
res['date'] = res['date'].map(lambda x: pd.Timestamp(x*1000000, tz='utc'))
res.set_index('date', inplace=True)
res.open = res.open.astype(np.float32)
res.high = res.high.astype(np.float32)
res.low = res.low.astype(np.float32)
res.close = res.close.astype(np.float32)
res.volume = res.volume.astype(np.float32)
if not cache[key].empty:
cache[key] = cache[key].drop(index=cache[key].index[-1])
cache[key] = | pd.concat([cache[key], res]) | pandas.concat |
import time
import csv
import gensim
import nltk
import numpy as np
import pandas as pd
from datetime import datetime
from gensim.models import Word2Vec
from gensim.models.callbacks import CallbackAny2Vec
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from progress.bar import Bar
from scipy import spatial
from tqdm import tqdm
class EpochLogger(CallbackAny2Vec):
def __init__(self):
self.starttime = 0.0
self.epoch = 0
self.tot_epochs = 100
self.single_epoch_time = 0
def on_epoch_begin(self, model):
if self.epoch != 0:
print(f"Started epoch {self.epoch}.")
def on_epoch_end(self, model):
if self.epoch == 0:
self.starttime = time.time()
self.single_epoch_time = time.time()
else:
if self.epoch != self.tot_epochs:
print(f"Finished epoch {self.epoch} in {time.time() - self.single_epoch_time}")
self.single_epoch_time = time.time()
else:
print(f"Training finished in {time.time() - self.starttime}s")
self.epoch += 1
class BiasModel:
nltk.download('averaged_perceptron_tagger')
nltk.download('vader_lexicon')
def __init__(self, comments_document, comment_column='body', output_name='outputModel',
window=4, min_frequency=10, out_dimension=200):
self.comments_document = comments_document
self.comment_column = comment_column
self.output_name = output_name
self.window = window
self.min_frequency = min_frequency
self.out_dimension = out_dimension
self.sentiment_analyzer = SentimentIntensityAnalyzer()
@staticmethod
def calculate_centroid(model, words):
embeddings = [np.array(model[w]) for w in words if w in model]
centroid = np.zeros(len(embeddings[0]))
for e in embeddings:
centroid += e
return centroid / len(embeddings)
@staticmethod
def get_cosine_distance(embedding1, embedding2):
return spatial.distance.cosine(embedding1, embedding2)
def load_csv_and_preprocess(self, path, nrowss=None, lemmatise=False):
"""
input:
nrowss <int> : number of rows to process, leave None if all
tolower <True/False> : transform all text to lowercase
returns:
List of preprocessed sentences, i.e. the input to train
"""
print(f"Processing document: {self.comments_document}")
trpCom = pd.read_csv(self.comments_document, lineterminator='\n', nrows=nrowss)
trpCom.fillna(0)
documents = []
with open(path, 'a', encoding='utf-8') as file:
for i, row in enumerate(trpCom[self.comment_column]):
if i % 500000 == 0:
print(f'Processing line {i}')
for word in documents:
file.write("%s\n" % word)
documents = []
try:
pp = gensim.utils.simple_preprocess(row)
if lemmatise:
pp = [wordnet_lemmatizer.lemmatize(w, pos="n") for w in pp]
documents.append(pp)
except TypeError:
print(f'Row {i} threw a type error.')
file.close()
print(f'Wrote corpus to file: {path}.')
return documents
def stream_load_csv_and_preprocess(self, csv_in, csv_out, corpus_out, subsample=False, fraction=None):
if subsample and fraction is None:
print("If subsampling is enabled a fraction must be specified.")
return
f_in = open(csv_in, encoding="utf-8")
reader = csv.DictReader(f_in)
tmp_df = pd.DataFrame()
previous_day = datetime.fromtimestamp(0)
with open(corpus_out, 'a', encoding='utf-8') as file:
for row in tqdm(reader):
mask = [True if val is not None and val != "" else False for val in row.values()]
if not all(mask):
print("Empty value in row.")
continue
next_day = datetime.fromtimestamp(int(row["created"])).date()
row["body"] = ' '.join(w for w in gensim.utils.simple_preprocess(row["body"]))
if previous_day != next_day and not tmp_df.empty:
tmp_df["created"] = | pd.to_datetime(tmp_df["created"], unit='s') | pandas.to_datetime |
import spacy
import pandas as pd
from warnings import filterwarnings
from pathlib import Path
from os.path import isfile
filterwarnings('ignore')
nlp = spacy.load('en_core_web_md')
def form_similar_sequences(text: str, n: int) -> list:
tokens = nlp(text)
token_tuples = [(tokens[i].text, i) for i in range(len(tokens)) if tokens[i].pos_ in {'ADJ', 'NOUN', 'VERB'}][:5]
texts_only = [t.text for t in tokens]
def most_similar(word: str):
token = nlp.vocab[word]
queries = [w for w in token.vocab if w.is_lower == token.is_lower and w.prob >= -15]
by_similarity = sorted(queries, key=lambda w: token.similarity(w), reverse=True)[1:n]
return [w.lower_ for w in by_similarity]
modified_sentences = []
while token_tuples:
replace, position = token_tuples.pop(0)
synonyms = most_similar(replace)
subsentences = []
for ind in range(len(synonyms)):
new_sentence = ' '.join(texts_only[0:position] + [synonyms[ind]])
if position < len(tokens) - 1:
new_sentence += ' '
new_sentence += ' '.join(texts_only[position+1:])
subsentences.append(new_sentence)
if subsentences:
modified_sentences.append(subsentences)
index = 0
short_list = [text]
while modified_sentences:
index %= len(modified_sentences)
current = modified_sentences[index]
if current:
next_sentence = current.pop(0)
if next_sentence not in short_list:
short_list.append(next_sentence)
else:
del modified_sentences[index]
index += 1
if len(short_list) >= n:
break
return short_list
def synonym_replacement(dataset_name: str, text_data: pd.Series, label_data: pd.Series, target_length) -> tuple:
base_path = Path(__file__).parent#
augmented_path = str(base_path / ('../datasets/' + dataset_name + "/processed_data/AugmentedData.csv"))
if isfile(augmented_path):
augmented_data = pd.read_csv(augmented_path, encoding="ISO-8859-1")
return augmented_data['clean_data'][:target_length], augmented_data['sarcasm_label'][:target_length]
duplicate_no = (target_length // len(text_data)) + 1
list_of_texts = []
list_of_labels = []
for index in range(len(text_data)):
print(index)
text = text_data[index]
label = label_data[index]
for t in form_similar_sequences(text, duplicate_no):
list_of_texts.append(t)
list_of_labels.append(label)
if len(list_of_labels) > target_length:
break
clean_data, sarcasm_labels = pd.Series(list_of_texts, name="clean_data")[:target_length], pd.Series(list_of_labels, name='sarcasm_label')[
:target_length]
new_data = | pd.concat([sarcasm_labels, clean_data], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Generate tessellation diagram
#
# Computational notebook 01 for **Morphological tessellation as a way of partitioning space: Improving consistency in urban morphology at the plot scale**.
#
#
# <NAME>., <NAME>., <NAME>. and <NAME>. (2020) _‘Morphological tessellation as a way of partitioning space: Improving consistency in urban morphology at the plot scale’_, Computers, Environment and Urban Systems, 80, p. 101441. doi: [10.1016/j.compenvurbsys.2019.101441](http://doi.org/10.1016/j.compenvurbsys.2019.101441).
#
# Contact: <EMAIL>
#
# Date: 27/03/2020
#
# Note: notebook has been cleaned and released retroactively. It is likely that different versions of packages were initially used, but we made sure that the results remained unaltered.
#
# ---
# **Description**
#
# This notebook generates diagrams illustrating the principles of morphological tessellation used to prepare figures 2, 3 and 4. Figures were later post-processed in Illustrator.
# In[1]:
import geopandas as gpd
import momepy as mm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import scipy as sp
import pandas as pd
from scipy.spatial import Voronoi, voronoi_plot_2d
from random import random
import shapely
from shapely.wkt import loads
from shapely.geometry import Polygon, Point
from tqdm import tqdm
# In[2]:
gpd.__version__, mm.__version__, matplotlib.__version__, sns.__version__, np.__version__, sp.__version__, shapely.__version__
# In[3]:
polys = [
Polygon([(10, 10), (20, 10), (20, 20), (10, 20)]),
Polygon([(30, 10), (40, 10), (40, 20), (30, 20)]),
Polygon([(10, 30), (20, 30), (20, 40), (10, 40)]),
Polygon([(20, 30), (30, 30), (30, 40), (20, 40)]),
Polygon([(30, 30), (40, 30), (40, 40), (30, 40)]),
]
# In[4]:
gdf = gpd.GeoDataFrame(geometry=polys)
gdf['uID'] = mm.unique_id(gdf)
retain = gdf.copy()
# In[5]:
sns.set_style('whitegrid')
sns.set_context(context='paper', font_scale=1, rc=None)
# In[6]:
f, ax = plt.subplots(figsize=(10, 10))
gdf.plot(ax=ax)
sns.despine(left=True, bottom=True)
#plt.savefig('tesdiag_0.svg')
# In[7]:
limit = Polygon([(0, 0), (50, 0), (50, 50), (0, 50)])
# In[8]:
f, ax = plt.subplots(figsize=(10, 10))
gdf.plot(ax=ax)
gpd.GeoDataFrame(geometry=[limit.boundary]).plot(ax=ax, edgecolor='red')
sns.despine(left=True, bottom=True)
#plt.savefig('tesdiag_1.svg')
# In[9]:
shrink = 1
# In[10]:
polys = ["Polygon", "MultiPolygon"]
print("Bufferring geometry...")
gdf["geometry"] = gdf.geometry.apply(
lambda g: g.buffer(-shrink, cap_style=2, join_style=2) if g.type in polys else g
)
# In[11]:
f, ax = plt.subplots(figsize=(10, 10))
gdf.plot(ax=ax)
sns.despine(left=True, bottom=True)
#plt.savefig('tesdiag_2.svg')
# In[12]:
segment = 2
# In[13]:
def _densify(geom, segment):
"""
Returns densified geoemtry with segments no longer than `segment`.
"""
from osgeo import ogr
poly = geom
wkt = geom.wkt # shapely Polygon to wkt
geom = ogr.CreateGeometryFromWkt(wkt) # create ogr geometry
geom.Segmentize(segment) # densify geometry by set metres
geom.CloseRings() # fix for GDAL 2.4.1 bug
wkt2 = geom.ExportToWkt() # ogr geometry to wkt
new = loads(wkt2) # wkt to shapely Polygon
return new
gdf["geometry"] = gdf["geometry"].apply(_densify, segment=segment)
# In[14]:
def _point_array(objects, unique_id):
"""
Returns lists of points and ids based on geometry and unique_id.
"""
points = []
ids = []
for idx, row in tqdm(objects.iterrows(), total=objects.shape[0]):
if row["geometry"].type in ["Polygon", "MultiPolygon"]:
poly_ext = row["geometry"].boundary
else:
poly_ext = row["geometry"]
if poly_ext is not None:
if poly_ext.type == "MultiLineString":
for line in poly_ext:
point_coords = line.coords
row_array = np.array(point_coords[:-1]).tolist()
for i, a in enumerate(row_array):
points.append(row_array[i])
ids.append(row[unique_id])
elif poly_ext.type == "LineString":
point_coords = poly_ext.coords
row_array = np.array(point_coords[:-1]).tolist()
for i, a in enumerate(row_array):
points.append(row_array[i])
ids.append(row[unique_id])
else:
raise Exception("Boundary type is {}".format(poly_ext.type))
return points, ids
points, ids = _point_array(gdf, 'uID')
# In[15]:
pts = [Point(p) for p in points]
# In[16]:
pts = gpd.GeoDataFrame(geometry=pts)
# In[17]:
f, ax = plt.subplots(figsize=(10, 10))
pts.plot(ax=ax)
sns.despine(left=True, bottom=True)
#plt.savefig('tesdiag_3.svg')
# In[18]:
hull = limit.buffer(100)
hull = _densify(hull, 10)
hull_array = np.array(hull.boundary.coords).tolist()
for i, a in enumerate(hull_array):
points.append(hull_array[i])
ids.append(-1)
# In[19]:
voronoi_diagram = Voronoi(np.array(points))
# In[20]:
def _regions(voronoi_diagram, unique_id, ids, crs):
"""
Generate GeoDataFrame of Voronoi regions from scipy.spatial.Voronoi.
"""
# generate DataFrame of results
regions = | pd.DataFrame() | pandas.DataFrame |
import os
from getpass import getpass
import pandas as pd
import numpy as np
import lib.galaxy_utilities as gu
from panoptes_client import Panoptes, Project, Subject
def find_duplicates():
Panoptes.connect(username='tingard', password=getpass())
gzb_project = Project.find(slug='tingard/galaxy-builder')
subject_sets = []
for set in gzb_project.links.subject_sets:
subject_sets.append(list(set.subjects))
subjects = [j for i in subject_sets for j in i]
subject_set_ids = [[np.int64(j.id) for j in i] for i in subject_sets]
ids = [int(i.id) for i in subjects]
dr7objids = [np.int64(i.metadata.get('SDSS dr7 id', False)) for i in subjects]
pairings = sorted(zip(ids, dr7objids), key=lambda i: i[0])
df = | pd.DataFrame(pairings, columns=('subject_id', 'dr7objid')) | pandas.DataFrame |
import glob
import tempfile
import pandas as pd
import pytest
from pandas.api.types import is_integer_dtype
from pandas.testing import assert_frame_equal, assert_series_equal
import grblogtools as glt
@pytest.fixture(scope="module")
def glass4_summary():
"""Summary data from API call."""
return glt.parse("data/*.log").summary()
@pytest.fixture(scope="module")
def glass4_progress():
"""Progress data from API call."""
return {
"norel": glt.parse("data/*.log").progress("norel"),
"rootlp": glt.parse("data/*.log").progress("rootlp"),
"nodelog": glt.parse("data/*.log").progress("nodelog"),
}
@pytest.fixture(scope="module")
def testlog_summary():
return glt.parse("tests/assets/*.log").summary()
@pytest.fixture(scope="module")
def testlog_progress():
return {
"norel": glt.parse("tests/assets/*.log").progress("norel"),
"rootlp": glt.parse("tests/assets/*.log").progress("rootlp"),
"nodelog": glt.parse("tests/assets/*.log").progress("nodelog"),
}
@pytest.fixture(scope="module")
def merged_log():
with tempfile.NamedTemporaryFile("w") as fp:
for path in sorted(glob.glob("data/912-glass4-*.log")):
with open(path) as infile:
fp.writelines(infile.readlines())
fp.flush()
yield fp.name
def test_merged_log(merged_log):
summary = glt.parse(merged_log).summary()
result = summary[["Seed", "Runtime", "LogFilePath", "LogNumber"]]
expected = pd.DataFrame(
[
{"Seed": 0, "Runtime": 35.66, "LogFilePath": merged_log, "LogNumber": 1},
{"Seed": 1, "Runtime": 42.79, "LogFilePath": merged_log, "LogNumber": 2},
{"Seed": 2, "Runtime": 11.37, "LogFilePath": merged_log, "LogNumber": 3},
]
)
assert_frame_equal(result, expected)
def test_summary(testlog_summary):
assert len(testlog_summary) == 7
assert set(testlog_summary.columns).issuperset(
{"Status", "ObjVal", "ReadingTime", "RelaxObj"}
)
def test_progress(testlog_progress):
assert len(testlog_progress) == 3
assert len(testlog_progress["norel"]) == 15
assert set(testlog_progress["norel"].columns).issuperset(
{"Time", "BestBd", "Incumbent"}
)
assert len(testlog_progress["rootlp"]) == 409
assert set(testlog_progress["rootlp"].columns).issuperset(
{"Iteration", "PInf", "DInf", "PObj", "DObj"}
)
assert len(testlog_progress["nodelog"]) == 133
assert set(testlog_progress["nodelog"].columns).issuperset(
{"Depth", "IntInf", "Incumbent", "BestBd", "ItPerNode", "ModelFile", "Version"}
)
def test_summary_glass4(glass4_summary):
assert len(glass4_summary) == 63
assert set(glass4_summary.columns).issuperset(
{"Status", "ObjVal", "ReadingTime", "RelaxObj", "Seed"}
)
def test_progress_glass4(glass4_progress):
assert len(glass4_progress) == 3
assert len(glass4_progress["norel"]) == 0
assert len(glass4_progress["rootlp"]) == 0
assert set(glass4_progress["nodelog"].columns).issuperset(
{"Depth", "IntInf", "Incumbent", "BestBd", "ItPerNode"}
)
def test_logfile(glass4_summary):
logfiles = glass4_summary["LogFilePath"]
assert len(logfiles.unique()) == len(logfiles)
assert logfiles.str.startswith("data/").all()
assert logfiles.str.endswith(".log").all()
assert_series_equal(
glass4_summary["LogFile (Parameter)"].apply(lambda l: "data/" + l),
logfiles,
check_names=False,
)
# log names are stripped of the model name and seed
log = glass4_summary["Log"]
assert log.str.startswith("912").all()
assert not log.str.contains("glass4").any()
assert not log.str.endswith("-").any()
assert len(log.unique()) == 21 # different seeds get same label
def test_modelfile(glass4_summary):
modelfiles = glass4_summary["ModelFilePath"]
assert len(modelfiles.unique()) == 1
assert glass4_summary["ModelFile"].eq("glass4").all()
assert glass4_summary["Model"].eq("glass4").all()
def test_parameters(glass4_summary):
seeds = glass4_summary["Seed"]
assert | is_integer_dtype(seeds) | pandas.api.types.is_integer_dtype |
import csv as cs
import os
from pathlib import Path
from app import app
import pandas as pd
from app.routes import app
def fileread(filename):
Time_f = 's'
# Open csv file
File_name = str(filename)
file_to_open = os.path.join(app.config['USER_FOLDER'],File_name)
csvFile = open(file_to_open, "r")
reader = cs.reader(csvFile)
Columns = []
Data1 = | pd.DataFrame(columns=Columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
df = pd.DataFrame(
[
["A", "group_1", pd.Timestamp(2019, 1, 1, 9)],
["B", "group_1", pd.Timestamp(2019, 1, 2, 9)],
["C", "group_2", pd.Timestamp(2019, 1, 3, 9)],
["D", "group_1", pd.Timestamp(2019, 1, 6, 9)],
["E", "group_1", pd.Timestamp(2019, 1, 7, 9)],
["F", "group_1", pd.Timestamp(2019, 1, 10, 9)],
["G", "group_2", pd.Timestamp(2019, 1, 20, 9)],
["H", "group_1", pd.Timestamp(2019, 4, 8, 9)],
],
columns=["index", "group", "eventTime"],
).set_index("index")
groups = df.groupby("group")
df["count_to_date"] = groups.cumcount()
rolling_groups = groups.rolling("10d", on="eventTime")
group_size = rolling_groups.apply(lambda df: df.shape[0])
print(group_size)
index = pd.MultiIndex.from_tuples(
[
("group_1", "A"),
("group_1", "B"),
("group_1", "D"),
("group_1", "E"),
("group_1", "F"),
("group_1", "H"),
("group_2", "C"),
("group_2", "G"),
],
names=["group", "index"],
)
columns = pd.Index(["eventTime", "count_to_date"], dtype="object")
values = np.array(
[
[pd.Timestamp("2019-01-01 09:00:00"), 1.0],
[pd.Timestamp("2019-01-02 09:00:00"), 2.0],
[pd.Timestamp("2019-01-06 09:00:00"), 3.0],
[pd.Timestamp("2019-01-07 09:00:00"), 4.0],
[pd.Timestamp("2019-01-10 09:00:00"), 5.0],
[ | pd.Timestamp("2019-04-08 09:00:00") | pandas.Timestamp |
from os.path import join, realpath, dirname, exists, basename
from os import makedirs
import pandas as pd
from pandas import CategoricalDtype
from tqdm.auto import tqdm
from .coalitions import coalitions
def generate_number_of_tweets_per_day(df, output_dir):
# exclude retweets
df = df[df.user_rt.isnull()]
value_counts = df['date'].value_counts()
df_value_counts = pd.DataFrame(value_counts)
df_value_counts = df_value_counts.reset_index()
df_value_counts.columns = ['date', 'number_of_tweets']
df_value_counts = df_value_counts.sort_values(by=['date'])
if not exists(output_dir):
makedirs(output_dir)
path = join(output_dir, "number_of_tweets_per_day.csv")
df_value_counts.to_csv(path, index=False)
def generate_number_of_retweets_per_day(df, output_dir):
df = df.dropna(subset=['user_rt'])
value_counts = df['date'].value_counts()
df_value_counts = pd.DataFrame(value_counts)
df_value_counts = df_value_counts.reset_index()
df_value_counts.columns = ['date', 'number_of_retweets']
df_value_counts = df_value_counts.sort_values(by=['date'])
if not exists(output_dir):
makedirs(output_dir)
path = join(output_dir, "number_of_retweets_per_day.csv")
df_value_counts.to_csv(path, index=False)
def generate_number_of_retweets_for_users_tweets_per_day(df, output_dir):
df_retweets_counts = df.groupby("date")['retweets_count'].sum().reset_index()
df_retweets_counts = df_retweets_counts.sort_values(by=['date'])
if not exists(output_dir):
makedirs(output_dir)
path = join(output_dir, "number_of_retweets_for_users_tweets_per_day.csv")
df_retweets_counts.to_csv(path, index=False)
def generate_number_of_likes_for_users_tweets_per_day(df, output_dir):
df_likes_counts = df.groupby("date")['likes_count'].sum().reset_index()
df_likes_counts = df_likes_counts.sort_values(by=['date'])
if not exists(output_dir):
makedirs(output_dir)
path = join(output_dir, "number_of_likes_for_users_tweets_per_day.csv")
df_likes_counts.to_csv(path, index=False)
def generate_tweeting_activity_distribution_in_a_day(df, output_dir):
# exclude retweets
df = df[df.user_rt.isnull()]
value_counts = pd.to_datetime(df['time']).dt.hour.value_counts(dropna=True)
df_value_counts = pd.DataFrame(value_counts)
df_value_counts = df_value_counts.reset_index()
df_value_counts.columns = ['hour', 'number_of_tweets']
df_value_counts = df_value_counts.sort_values(by=['hour'])
if not exists(output_dir):
makedirs(output_dir)
path = join(output_dir, "tweeting_activity_distribution_in_a_day.csv")
df_value_counts.to_csv(path, index=False)
def generate_tweeting_activity_distribution_in_a_week(df, output_dir):
# exclude retweets
df = df[df.user_rt.isnull()]
value_counts = pd.to_datetime(df['date']).dt.day_name().value_counts(dropna=True)
df_value_counts = pd.DataFrame(value_counts)
df_value_counts = df_value_counts.reset_index()
df_value_counts.columns = ['week_day', 'number_of_tweets']
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
cat_type = CategoricalDtype(categories=cats, ordered=True)
df_value_counts['week_day'] = df_value_counts['week_day'].astype(cat_type)
df_value_counts = df_value_counts.sort_values(by=['week_day'])
if not exists(output_dir):
makedirs(output_dir)
path = join(output_dir, "tweeting_activity_distribution_in_a_week.csv")
df_value_counts.to_csv(path, index=False)
def generate_retweeting_activity_distribution_in_a_day(df, output_dir):
df = df.dropna(subset=['user_rt'])
value_counts = pd.to_datetime(df['time']).dt.hour.value_counts(dropna=True)
df_value_counts = pd.DataFrame(value_counts)
df_value_counts = df_value_counts.reset_index()
df_value_counts.columns = ['hour', 'number_of_retweets']
df_value_counts = df_value_counts.sort_values(by=['hour'])
if not exists(output_dir):
makedirs(output_dir)
path = join(output_dir, "retweeting_activity_distribution_in_a_day.csv")
df_value_counts.to_csv(path, index=False)
def generate_retweeting_activity_distribution_in_a_week(df, output_dir):
df = df.dropna(subset=['user_rt'])
value_counts = | pd.to_datetime(df['date']) | pandas.to_datetime |
#------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
import pandas as pd
import numpy as np
from sklearn.base import is_regressor
from collections.abc import Sequence
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
def check_estimator(estimator):
"""Check estimator"""
if not is_regressor(estimator):
raise Exception(f"Estimator '{type(estimator).__name__}' did not pass an a regressor")
# Required methods of an regression model
REQUIRED_METHODS = ["get_params", "set_params", "fit", "predict", "score"]
# Check if class has required methods by using getattr() to get the attribute, and callable() to verify it is a method
for method in REQUIRED_METHODS:
method_attribute = getattr(estimator, method, None)
if not callable(method_attribute):
raise Exception(f"Estimator '{type(estimator).__name__}' does not contain the method '{method}'")
def check_param_grid(param_grid):
if hasattr(param_grid, "items"):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
if isinstance(v, str) or not isinstance(v, (np.ndarray, Sequence)):
raise ValueError(
"Parameter grid for parameter ({0}) needs to"
" be a list or numpy array, but got ({1})."
" Single values need to be wrapped in a list"
" with one element.".format(name, type(v))
)
if len(v) == 0:
raise ValueError(
"Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name)
)
def check_X(X):
# Convert to dataframe if not already converted
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
# Check for numeric dtype
if not all(X.dtypes.map(pd.api.types.is_numeric_dtype)):
raise Exception("X must be only numeric dtype")
# Check for missing
if X.isnull().values.any():
raise Exception("X contains missing inputs")
# Convert back to np.array
X = np.array(X)
return X
def check_Y(Y):
# Convert to series if not already converted
if not isinstance(Y, pd.Series):
Y = pd.Series(Y)
# Check for numeric dtype
if not | pd.api.types.is_numeric_dtype(Y.dtype) | pandas.api.types.is_numeric_dtype |
from functools import partial
import pandas as pd
# the following functions should be easy:
# a function that just gets all the component collections it requires.
# useful for pandas.
# a function that gets the component collections, filtered by the
# intersection of entity_id. Useful for Python, not useful for pandas.
# a function that gets a single instance of each component it requests.
# useful for Python update functions, not useful for pandas.
# a pure function that gets all component collections it requires, and
# must return new copies of those component collections, including having
# done any add and removal.
# a pure function that gets the component collections, filtered by the
# interaction of entity_ids, and returns collections with updated components,
# plus component_id / entity_ids to remove, plus component / entity_ids to add.
# Useful for Pandas and Python.
# a pure function that *returns* a new single instance of each component it
# requests. This is going to be stored in the collection.
class DictContainer(dict):
"""Component container backed by dict.
This is in fact a Python dict.
"""
def value(self):
"""Get the underlying container object.
"""
return self
class DataFrameContainer:
"""Component container backed by pandas DataFrame.
This can give a performance boost when you have a large
amount of components and you use vectorized functionality to query
and update components.
adds and removes are buffered for efficiency, only once
the container is accessed for its value is the buffer flushed;
typically this happens before the next system runs that requires
this component container.
"""
def __init__(self):
self.df = pd.DataFrame([])
self.to_add_entity_ids = []
self.to_add_components = []
self.to_remove_entity_ids = []
def __setitem__(self, entity_id, component):
self.to_add_entity_ids.append(entity_id)
self.to_add_components.append(component)
def __delitem__(self, entity_id):
self.to_remove.append(entity_id)
def _complete(self):
self._complete_remove()
self._complete_add()
def _complete_add(self):
if not self.to_add_entity_ids:
return
add_df = self._create(self.to_add_components,
self.to_add_entity_ids)
self.df = pd.concat([self.df, add_df])
self.to_add_entity_ids = []
self.to_add_components = []
def _complete_remove(self):
if not self.to_remove_entity_ids:
return
self.df = self.df.drop(self.to_remove_entity_ids)
def _create(self, components, entity_ids):
return | pd.DataFrame(components, index=entity_ids) | pandas.DataFrame |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = | Period(freq='D', year=2006, month=12, day=29) | pandas.tseries.period.Period |
"""
An improved version of your marketsim code that accepts
a "trades" data frame (instead of a file).
More info on the trades data frame below.
"""
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
import pandas as pd
import numpy as np
import datetime as dt
from util import get_data, plot_data
def compute_portvals(df_orders=None, orders_file=None,
start_val=1000000, commission=9.95, impact=0.005,
start_date=None, end_date=None):
# NOTE: orders_file may be a string, or it may be a file object.
# 1. Import Order dataframe
# df_orders = pd.read_csv(orders_file, index_col='Date', parse_dates=True, na_values=['nan'])
# print("df_orders")
# print(df_orders.head())
# 2. Sort order file by dates (ascending)
df_orders = df_orders.sort_index(ascending=1)
# print("df_orders")
# print(df_orders)
# print(df_orders.shape)
# 3. Get symbols for the portfolio
symbols = df_orders["Symbol"].unique().tolist()
# print("symbols")
# print(symbols)
# print(type(symbols))
# 4. Get date range.
# start_date = min(df_orders.index)
# end_date = max(df_orders.index)
# print("start_date", start_date)
# print(type(start_date))
# print("end_date", end_date)
# print(type(end_date))
# 5. Get df_prices using adjusted closing price, add cash column at last (all 1.0)
df_prices = get_data(symbols, | pd.date_range(start_date, end_date) | pandas.date_range |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
"""
Test related to MultiIndex
"""
import re
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core.column import as_column
from cudf.core.index import as_index
from cudf.tests.utils import assert_eq, assert_neq
def test_multiindex_levels_codes_validation():
levels = [["a", "b"], ["c", "d"]]
# Codes not a sequence of sequences
with pytest.raises(TypeError):
pd.MultiIndex(levels, [0, 1])
with pytest.raises(TypeError):
cudf.MultiIndex(levels, [0, 1])
# Codes don't match levels
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0], [1], [1]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0], [1], [1]])
# Largest code greater than number of levels
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0, 1], [0, 2]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0, 1], [0, 2]])
# Unequal code lengths
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0, 1], [0]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0, 1], [0]])
# Didn't pass levels and codes
with pytest.raises(TypeError):
pd.MultiIndex()
with pytest.raises(TypeError):
cudf.MultiIndex()
# Didn't pass non zero levels and codes
with pytest.raises(ValueError):
pd.MultiIndex([], [])
with pytest.raises(ValueError):
cudf.MultiIndex([], [])
def test_multiindex_construction():
levels = [["a", "b"], ["c", "d"]]
codes = [[0, 1], [1, 0]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
pmi = | pd.MultiIndex(levels, codes) | pandas.MultiIndex |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 02:13:48 2016
@author: Евгений
"""
import pandas as pd
from folders import ParsedCSV
from row_parser import get_colname_dtypes
file = ParsedCSV(2015).filepath()
chunksize = 100*1000
chunks = pd.read_csv(file, dtype=get_colname_dtypes(), chunksize=chunksize, iterator=True)
def nlargest(df0, n=100):
max_vals = sorted(df0.ta.tolist(), reverse=True)[:n]
ix = df0['ta'].isin(max_vals)
return df0[ix]
def process_chunks(chunks):
result = | pd.DataFrame() | pandas.DataFrame |
"""Tests for irradiance quality control functions."""
from datetime import datetime
import pytz
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
from pvanalytics.quality import irradiance
@pytest.fixture
def irradiance_qcrad():
"""Synthetic irradiance data and its expected quality flags.
Notes
-----
Copyright (c) 2019 SolarArbiter. See the file
LICENSES/SOLARFORECASTARBITER_LICENSE at the top level directory
of this distribution and at `<https://github.com/pvlib/
pvanalytics/blob/master/LICENSES/SOLARFORECASTARBITER_LICENSE>`_
for more information.
"""
output = pd.DataFrame(
columns=['ghi', 'dhi', 'dni', 'solar_zenith', 'dni_extra',
'ghi_limit_flag', 'dhi_limit_flag', 'dni_limit_flag',
'consistent_components', 'diffuse_ratio_limit'],
data=np.array([[-100, 100, 100, 30, 1370, 0, 1, 1, 0, 0],
[100, -100, 100, 30, 1370, 1, 0, 1, 0, 0],
[100, 100, -100, 30, 1370, 1, 1, 0, 0, 1],
[1000, 100, 900, 0, 1370, 1, 1, 1, 1, 1],
[1000, 200, 800, 15, 1370, 1, 1, 1, 1, 1],
[1000, 200, 800, 60, 1370, 0, 1, 1, 0, 1],
[1000, 300, 850, 80, 1370, 0, 0, 1, 0, 1],
[1000, 500, 800, 90, 1370, 0, 0, 1, 0, 1],
[500, 100, 1100, 0, 1370, 1, 1, 1, 0, 1],
[1000, 300, 1200, 0, 1370, 1, 1, 1, 0, 1],
[500, 600, 100, 60, 1370, 1, 1, 1, 0, 0],
[500, 600, 400, 80, 1370, 0, 0, 1, 0, 0],
[500, 500, 300, 80, 1370, 0, 0, 1, 1, 1],
[0, 0, 0, 93, 1370, 1, 1, 1, 0, 0]]))
dtypes = ['float64', 'float64', 'float64', 'float64', 'float64',
'bool', 'bool', 'bool', 'bool', 'bool']
for (col, typ) in zip(output.columns, dtypes):
output[col] = output[col].astype(typ)
return output
def test_check_ghi_limits_qcrad(irradiance_qcrad):
"""Test that QCRad identifies out of bounds GHI values.
Notes
-----
Copyright (c) 2019 SolarArbiter. See the file
LICENSES/SOLARFORECASTARBITER_LICENSE at the top level directory
of this distribution and at `<https://github.com/pvlib/
pvanalytics/blob/master/LICENSES/SOLARFORECASTARBITER_LICENSE>`_
for more information.
"""
expected = irradiance_qcrad
ghi_out_expected = expected['ghi_limit_flag']
ghi_out = irradiance.check_ghi_limits_qcrad(expected['ghi'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(ghi_out, ghi_out_expected, check_names=False)
def test_check_dhi_limits_qcrad(irradiance_qcrad):
"""Test that QCRad identifies out of bounds DHI values.
Notes
-----
Copyright (c) 2019 SolarArbiter. See the file
LICENSES/SOLARFORECASTARBITER_LICENSE at the top level directory
of this distribution and at `<https://github.com/pvlib/
pvanalytics/blob/master/LICENSES/SOLARFORECASTARBITER_LICENSE>`_
for more information.
"""
expected = irradiance_qcrad
dhi_out_expected = expected['dhi_limit_flag']
dhi_out = irradiance.check_dhi_limits_qcrad(expected['dhi'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(dhi_out, dhi_out_expected, check_names=False)
def test_check_dni_limits_qcrad(irradiance_qcrad):
"""Test that QCRad identifies out of bounds DNI values.
Notes
-----
Copyright (c) 2019 SolarArbiter. See the file
LICENSES/SOLARFORECASTARBITER_LICENSE at the top level directory
of this distribution and at `<https://github.com/pvlib/
pvanalytics/blob/master/LICENSES/SOLARFORECASTARBITER_LICENSE>`_
for more information.
"""
expected = irradiance_qcrad
dni_out_expected = expected['dni_limit_flag']
dni_out = irradiance.check_dni_limits_qcrad(expected['dni'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(dni_out, dni_out_expected, check_names=False)
def test_check_irradiance_limits_qcrad(irradiance_qcrad):
"""Test different input combinations to check_irradiance_limits_qcrad.
Notes
-----
Copyright (c) 2019 SolarArbiter. See the file
LICENSES/SOLARFORECASTARBITER_LICENSE at the top level directory
of this distribution and at `<https://github.com/pvlib/
pvanalytics/blob/master/LICENSES/SOLARFORECASTARBITER_LICENSE>`_
for more information.
"""
expected = irradiance_qcrad
ghi_out_expected = expected['ghi_limit_flag']
ghi_out, dhi_out, dni_out = irradiance.check_irradiance_limits_qcrad(
expected['solar_zenith'], expected['dni_extra'], ghi=expected['ghi'])
| assert_series_equal(ghi_out, ghi_out_expected, check_names=False) | pandas.util.testing.assert_series_equal |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from hash import *
class simulation:
def __init__(self, length=12096, mu=0, sigma=0.001117728,
b_target=10, block_reward=12.5, hash_ubd=55,
hash_slope=3, hash_center=1.5, prev_data=pd.DataFrame(),
T_BCH=144, T_BTC=2016, init_price=5400,
init_winning_rate=0.00003):
'''
Parameters
----------
length: time length of simulation
length = the number of blocks generated in one simulation.
A new block is generated in 10 minutes in expection;
12096 blocks are generated in three months in expectation.
mu: average of the brownian motion
sigma: standard deviation of the brownian motion
b_target: target block time (min) (default: 10 min)
\bar{B}
block_reward:
the amount of cryptocurrency the miner receives when he
adds a block. (default: 12.5)
hash_ubd: the upper bound of global hash rate.
hash_slope, hash_center:
the parameters that affects the shape of hash supply function
prev_data:
a pandas dataframe containing (i) prices, (ii) winning rates,
(iii) hash rates, and (iv) block times.
The number of rows should coincides with T_BCH.
T_BCH: the length of the time window used for DAA of BCH.
T_BTC: the length of the time window used for DAA of BTC.
init_price: the initial price.
init_winning-rate: the inirial winning rate.
Attributes
----------
block_times
prices
winning_rates
hash_rates
optimal_winning_rates
expected_returns
Notes
-----
* As for BTC and BCH, b_target is set to be 10 minutes.
'''
# params
self.mu = mu
self.sigma = sigma
self.b_target = b_target
self.length = length
self.block_reward = block_reward
self.hash_ubd = hash_ubd
self.hash_slope = hash_slope
self.hash_center = hash_center
self.T_BCH = T_BCH
self.T_BTC = T_BTC
if prev_data.empty == True:
self.prev_prices = np.ones(T_BCH) * init_price
self.prev_block_times = np.ones(T_BCH) * b_target
self.prev_winning_rates = np.ones(T_BCH) * init_winning_rate
else:
self.prev_prices = prev_data['prices']
self.prev_block_times = prev_data['block_times']
self.prev_winning_rates = prev_data['winning_rates']
def sim_DAA_1(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
init_height=551443, presim_length=2016, ubd_param=3):
'''
Conduct a simulation using DAA-1 as its DAA.
DAA-1 is based on the DAA used by BTC.
Parameters
----------
prices : exogenously given. price[t] is the price at time 10*t
exprvs : exogenously given; used for computing block times.
opt_w : the oprimal winning rates, computed in advance.
init_height :
the height of the block that is created first
in the simulation. (default: 551443)
presim_length :
the length of periods contained in prev_data.
(Real data used for the pre-simulation period.)
See also __init__.
ubd_param :
determine the maximum number of iterations
See also _initialization.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted
every self.T_BTC periods. In reality, BTC lets T_BTC = 2016.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
if (init_height + t)%self.T_BTC == 0:
self.diff_adjust_BTC(current_period=t)
else:
break
self._postprocessing(period)
return None
def sim_DAA_2(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
presim_length=2016, ubd_param=3):
'''
Conduct a simulation using DAA-2 as its DAA.
DAA-2 is based on the DAA used by BCH.
Parameters
----------
prices: see sim_BTC.
exprvs: see sim_BTC.
presim_length: see sim_BTC.
ubd_param: see sim_BTC.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted every period.
At each adjustment, the last T_BCH blocks are taken into account.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
## different from that of BTC in that
## difficulty adjustment is conducted every period.
self.diff_adjust_BCH(current_period=t)
else:
break
self._postprocessing(period)
return None
def sim_DAA_asert(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
presim_length=2016, ubd_param=3, half_life=2880):
'''
Conduct a simulation using DAA-2 as its DAA.
DAA-2 is based on the DAA used by BCH.
Parameters
----------
prices: see sim_BTC.
exprvs: see sim_BTC.
presim_length: see sim_BTC.
ubd_param: see sim_BTC.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted every period.
At each adjustment, the last T_BCH blocks are taken into account.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
## different from that of BTC in that
## difficulty adjustment is conducted every period.
self.diff_adjust_asert(current_period=t, half_life=half_life)
else:
break
self._postprocessing(period)
return None
def sim_DAA_0(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
init_height=551443, presim_length=2016, ubd_param=3):
'''
Conduct a simulation where the difficulty is always adjusted
to the optimal level. (imaginary DAA)
Parameters
----------
prices : exogenously given. price[t] is the price at time 10*t
exprvs : exogenously given; used for computing block times.
opt_w :
init_height :
the height of the block that is created first
in the simulation. (default: 551443)
presim_length :
the length of periods contained in prev_data.
(Real data used for the pre-simulation period.)
See also __init__.
ubd_param :
determine the maximum number of iterations
See also _initialization.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted
every self.T_BTC periods. In reality, BTC lets T_BTC = 2016.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# W^*(t)
## W(t) = W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
self.winning_rates[t] = self.optimal_winning_rates[t]
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
else:
break
self._postprocessing(period)
return None
def compute_price(self, current_period, current_time, prices):
'''
Compute the price at the time when the (t+1)-th block is created:
compute S(t+1) using price data via linear interpolation.
prices contains the price date recorded every 10 minutes.
'''
time_left = int(current_time//self.b_target)
time_right = time_left + 1
self.prices[current_period+1] = \
prices[time_left] + (prices[time_right] - prices[time_left]) * \
((current_time - time_left*self.b_target)/self.b_target)
return None
def diff_adjust_BTC(self, current_period):
'''
Used by sim_DAA-1.
Modify self.winning_rates in place.
'''
multiplier = \
(self.block_times[current_period-self.T_BTC+1:\
current_period+1].sum() / (self.T_BTC * self.b_target))
self.winning_rates[current_period+1:current_period+self.T_BTC+1] = \
self.winning_rates[current_period] * multiplier
return None
def diff_adjust_BCH(self, current_period):
'''
Used by sim_DAA_2.
Modify self.winning_rates in place.
'''
# the term related to B(t)
block_term = \
(self.block_times[current_period-self.T_BCH+1: \
current_period+1].sum() / self.b_target)
# the term related to W(t)
temp = np.ones(self.T_BCH)
w_inverses = temp / (self.winning_rates[current_period-self.T_BCH+1: \
current_period+1])
winning_prob_term = 1 / w_inverses.sum()
# update W(t)
self.winning_rates[current_period+1] = \
block_term * winning_prob_term
return None
def diff_adjust_asert(self, current_period, half_life=2880):
'''
Used by sim_DAA_asert.
Modify self.winning_rates in place.
'''
temp = (self.block_times[current_period] - self.b_target)/half_life
# update W(t)
self.winning_rates[current_period+1] = \
self.winning_rates[current_period] * np.exp(temp)
return None
def hash_supply(self, current_period):
'''
Compute hash supply in current period (EH)
'''
current_exp_reward = \
(self.prices[current_period] * self.winning_rates[current_period]
* self.block_reward)
return self.hash_ubd * \
self._sigmoid(self.hash_slope *
(current_exp_reward - self.hash_center))
def _sigmoid(self, x):
sigmoid_range = 34.538776394910684
if x <= -sigmoid_range:
return 1e-15
if x >= sigmoid_range:
return 1.0 - 1e-15
return 1.0 / (1.0 + np.exp(-x))
def _initialization(self, ubd_param, presim_length=2016):
# the number of iteration cannot exceeds self.length * self.ubd_param
sim_length_ubd = self.length * ubd_param
self.prices = np.zeros((sim_length_ubd,)) # S(t)
self.winning_rates = np.zeros((sim_length_ubd,)) # W(t)
self.block_times = np.zeros((sim_length_ubd,)) # B(t)
self.hash_rates = np.zeros((sim_length_ubd,)) #H(t)
self.optimal_winning_rates = np.zeros((sim_length_ubd,)) #W^*(t)
self.expected_rewards = np.zeros((sim_length_ubd,)) #R(t)
# add pre-simulation periods
self.prices = np.hstack([self.prev_prices, self.prices])
self.block_times = \
np.hstack([self.prev_block_times, self.block_times])
self.winning_rates = \
np.hstack([self.prev_winning_rates, self.winning_rates])
## for BTC, set the winning rates
self.winning_rates[presim_length:presim_length+self.T_BTC] = \
self.winning_rates[presim_length-1]
## hash rates in pre-simulation periods will not be used
## The same is true of opt_win_rate and exp_returns
_ = np.zeros(presim_length) + self.hash_supply(presim_length-1) # may be redundant
self.hash_rates = np.hstack([_, self.hash_rates])
_ = np.zeros(presim_length)
self.optimal_winning_rates = np.hstack([_, self.optimal_winning_rates])
self.expected_rewards = np.hstack([_, self.expected_rewards])
return None
def _postprocessing(self, period, presim_length=2016):
self.block_times = self.block_times[presim_length:period]
self.prices = self.prices[presim_length:period]
self.winning_rates = self.winning_rates[presim_length:period]
self.hash_rates = self.hash_rates[presim_length:period]
self.optimal_winning_rates =\
self.optimal_winning_rates[presim_length:period]
self.expected_rewards = self.expected_rewards[presim_length:period]
return None
# Functions
def generate_simulation_data(num_iter=3, price_shock=0, T=None,
opt_w=pd.DataFrame(), prev_data=pd.DataFrame(),
dir_sim='/Volumes/Data/research/BDA/simulation/'):
'''
Notes
-----
num_iter is a number of observations.
The price data 'sim_prices_ps={}_5000obs.csv'.format(price_shock) should
be created in advance.
If T is specified, T_BTC <- T and T_BCH <- T.
'''
df_exprvs = pd.read_csv(dir_sim+'sim_exprvs_5000obs.csv')
df_price = pd.read_csv(dir_sim+'sim_prices_ps={}_5000obs.csv'\
.format(price_shock))
df_opt_w = pd.read_csv(dir_sim + 'opt_w.csv', index_col=0)
path = '../data/BTCdata_presim.csv'
prev_data = pd.read_csv(path)
prev_data['time'] = pd.to_datetime(prev_data['time'])
prev_data = prev_data.rename(columns={'blocktime': 'block_times', 'price': 'prices', 'probability of success /Eh': 'winning_rates'})
df_DAA_1_blocktime = pd.DataFrame()
df_DAA_1_hashrate = pd.DataFrame()
df_DAA_1_winrate = pd.DataFrame()
df_DAA_1_optwinrate = pd.DataFrame()
df_DAA_1_expreward = pd.DataFrame()
df_DAA_2_blocktime = pd.DataFrame()
df_DAA_2_hashrate = pd.DataFrame()
df_DAA_2_winrate = pd.DataFrame()
df_DAA_2_optwinrate = pd.DataFrame()
df_DAA_2_expreward = pd.DataFrame()
df_DAA_0_blocktime = pd.DataFrame()
df_DAA_0_hashrate = pd.DataFrame()
df_DAA_0_winrate = pd.DataFrame()
df_DAA_0_optwinrate = pd.DataFrame()
df_DAA_0_expreward = pd.DataFrame()
if T:
T_BTC = T
T_BCH = T
else:
T_BTC = 2016
T_BCH = 144
sim = simulation(prev_data=prev_data, T_BTC=T_BTC, T_BCH=T_BCH)
for iter in range(num_iter):
prices = df_price.loc[:, 'iter_{}'.format(iter)]
exprvs = df_exprvs.loc[:, 'iter_{}'.format(iter)]
# DAA-1
_blocktime = pd.DataFrame()
_hashrate = pd.DataFrame()
_winrate = pd.DataFrame()
_optwinrate = pd.DataFrame()
_expreward = pd.DataFrame()
sim.sim_DAA_1(prices=prices, exprvs=exprvs, df_opt_w=df_opt_w)
_blocktime['iter_{}'.format(iter)] = sim.block_times
_hashrate['iter_{}'.format(iter)] = sim.hash_rates
_winrate['iter_{}'.format(iter)] = sim.winning_rates
_optwinrate['iter_{}'.format(iter)] = sim.optimal_winning_rates
_expreward['iter_{}'.format(iter)] = sim.expected_rewards
df_DAA_1_blocktime = pd.concat([df_DAA_1_blocktime, _blocktime], axis=1)
df_DAA_1_hashrate = pd.concat([df_DAA_1_hashrate, _hashrate], axis=1)
df_DAA_1_winrate = pd.concat([df_DAA_1_winrate, _winrate], axis=1)
df_DAA_1_optwinrate = pd.concat([df_DAA_1_optwinrate, _optwinrate], axis=1)
df_DAA_1_expreward = pd.concat([df_DAA_1_expreward, _expreward], axis=1)
# DAA-2
_blocktime = pd.DataFrame()
_hashrate = pd.DataFrame()
_winrate = pd.DataFrame()
_optwinrate = pd.DataFrame()
_expreward = pd.DataFrame()
sim.sim_DAA_2(prices=prices, exprvs=exprvs, df_opt_w=df_opt_w)
_blocktime['iter_{}'.format(iter)] = sim.block_times
_hashrate['iter_{}'.format(iter)] = sim.hash_rates
_winrate['iter_{}'.format(iter)] = sim.winning_rates
_optwinrate['iter_{}'.format(iter)] = sim.optimal_winning_rates
_expreward['iter_{}'.format(iter)] = sim.expected_rewards
df_DAA_2_blocktime = pd.concat([df_DAA_2_blocktime, _blocktime], axis=1)
df_DAA_2_hashrate = pd.concat([df_DAA_2_hashrate, _hashrate], axis=1)
df_DAA_2_winrate = pd.concat([df_DAA_2_winrate, _winrate], axis=1)
df_DAA_2_optwinrate = pd.concat([df_DAA_2_optwinrate, _optwinrate], axis=1)
df_DAA_2_expreward = pd.concat([df_DAA_2_expreward, _expreward], axis=1)
df_DAA_1_blocktime.to_csv(dir_sim+'DAA-1_blocktime_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_1_hashrate.to_csv(dir_sim+'DAA-1_hashrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_1_winrate.to_csv(dir_sim+'DAA-1_winrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_1_optwinrate.to_csv(dir_sim+'DAA-1_optwinrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_1_expreward.to_csv(dir_sim+'DAA-1_expreward_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_blocktime.to_csv(dir_sim+'DAA-2_blocktime_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_hashrate.to_csv(dir_sim+'DAA-2_hashrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_winrate.to_csv(dir_sim+'DAA-2_winrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_optwinrate.to_csv(dir_sim+'DAA-2_optwinrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_expreward.to_csv(dir_sim+'DAA-2_expreward_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
return None
def generate_simulation_data_DAA0(num_iter=3, price_shock=0,
opt_w=pd.DataFrame(), prev_data=pd.DataFrame(),
dir_sim='/Volumes/Data/research/BDA/simulation/'):
'''
Notes
-----
num_iter is a number of observations.
The price data 'sim_prices_ps={}_5000obs.csv'.format(price_shock) should
be created in advance.
'''
df_exprvs = pd.read_csv(dir_sim+'sim_exprvs_5000obs.csv')
df_price = pd.read_csv(dir_sim+'sim_prices_ps={}_5000obs.csv'\
.format(price_shock))
df_opt_w = pd.read_csv(dir_sim + 'opt_w.csv', index_col=0)
path = '../data/BTCdata_presim.csv'
prev_data = pd.read_csv(path)
prev_data['time'] = | pd.to_datetime(prev_data['time']) | pandas.to_datetime |
import os
import json
from typing import Union
from scipy.spatial import distance
import numpy as np
import pandas as pd
import bottleneck
from .fileformat import WordVecSpaceFile
from .base import WordVecSpaceBase
np.set_printoptions(precision=4)
check_equal = np.testing.assert_array_almost_equal
# export data directory path for test cases
# $export WORDVECSPACE_DATADIR=/path/to/data/
DATAFILE_ENV_VAR = os.environ.get("WORDVECSPACE_DATADIR", "")
class WordVecSpace(WordVecSpaceBase):
METRIC = "cosine"
DEFAULT_K = 512
def __init__(self, input_dir: str, metric: str = METRIC) -> None:
self._f = WordVecSpaceFile(input_dir, mode="r")
self.input_dir = input_dir
self.metric = metric
self.nvecs = len(self._f)
self.dim = int(self._f.dim)
self.vecs = self._f.vecs
self.wtoi = self._f.wtoi
self.itow = self._f.itow
self.occurs = self._f.occurs
self.mags = self._f.mags
def _make_array(self, shape, dtype):
return np.ndarray(shape, dtype)
def _check_index_or_word(self, item):
if isinstance(item, str):
return self.get_index(item)
return item
def _check_indices_or_words(self, items):
w = items
if len(w) == 0:
return []
if isinstance(w, np.ndarray):
assert w.dtype == np.uint32 and len(w.shape) == 1
if isinstance(w, (list, tuple)):
if isinstance(w[0], str):
return self.get_indices(w)
return w
def _check_vec(self, v, normalized=False):
if isinstance(v, np.ndarray) and len(v.shape) == 2 and v.dtype == np.float32:
if normalized:
m = np.linalg.norm(v)
return v / m
return v
else:
if isinstance(v, (list, tuple)):
return self.get_vectors(v, normalized=normalized)
return self.get_vector(v, normalized=normalized)
def get_manifest(self) -> dict:
manifest_info = open(os.path.join(self.input_dir, "manifest.json"), "r")
manifest_info = json.loads(manifest_info.read())
return manifest_info
def does_word_exist(self, word: str) -> bool:
return word in self.wtoi
def get_index(self, word: str) -> int:
assert isinstance(word, str)
return self.wtoi[word]
def get_indices(self, words: list) -> list:
assert isinstance(words, (tuple, list)) and len(words) != 0
indices = [self.wtoi[w] for w in words]
return indices
def get_word(self, index: int) -> str:
return self.itow[index]
def get_words(self, indices: list) -> list:
return [self.itow[i] for i in indices]
def get_magnitude(self, word_or_index: Union[int, str]) -> np.float32:
index = self._check_index_or_word(word_or_index)
return self.mags[index]
def get_magnitudes(self, words_or_indices: list) -> np.ndarray:
w = self._check_indices_or_words(words_or_indices)
return self.mags.take(w)
def get_occurrence(self, word_or_index: Union[int, str]) -> int:
index = self._check_index_or_word(word_or_index)
return self.occurs[index]
def get_occurrences(self, words_or_indices: list) -> list:
w = self._check_indices_or_words(words_or_indices)
return self.occurs.take(w)
def get_vector(
self, word_or_index: Union[int, str], normalized: bool = False
) -> np.ndarray:
index = self._check_index_or_word(word_or_index)
if normalized:
return self.vecs[index]
return self.vecs[index] * self.mags[index]
def get_vectors(
self, words_or_indices: list, normalized: bool = False
) -> np.ndarray:
w = self._check_indices_or_words(words_or_indices)
if normalized:
return self.vecs.take(w, axis=0)
vecs = self.vecs.take(w, axis=0)
mags = self.mags.take(w)
return np.multiply(vecs.T, mags).T
def get_distance(
self,
word_or_index1: Union[int, str],
word_or_index2: Union[int, str],
metric: str = "cosine",
normalized: bool = True,
) -> float:
w1 = word_or_index1
w2 = word_or_index2
if not metric:
metric = self.metric
if metric in ("cosine", "angular"):
vec1 = self._check_vec(w1, normalized)
vec2 = self._check_vec(w2, normalized)
return 1 - np.dot(vec1, vec2.T)
elif metric == "euclidean":
vec1 = self._check_vec(w1)
vec2 = self._check_vec(w2)
return distance.euclidean(vec1, vec2)
def _check_r_and_c(self, r, c, m):
if not m:
m = self.metric
if not isinstance(r, (tuple, list, np.ndarray)):
r = [r]
if c is not None and len(c):
if not isinstance(c, (tuple, list, np.ndarray)):
c = [c]
return m, r, c
def get_distances(
self,
row_words_or_indices: Union[list, np.ndarray],
col_words_or_indices: Union[list, None, np.ndarray] = None,
metric=None,
normalized: bool = True,
) -> np.ndarray:
r = row_words_or_indices
c = col_words_or_indices
metric, r, c = self._check_r_and_c(r, c, metric)
if metric in ("cosine", "angular"):
row_vectors = self._check_vec(r, normalized)
col_vectors = self.vecs
if c is not None and len(c):
col_vectors = self._check_vec(c, normalized)
if len(r) == 1:
nvecs, dim = col_vectors.shape
vec_out = self._make_array(
(len(col_vectors), len(row_vectors)), dtype=np.float32
)
res = self._perform_sgemv(row_vectors, col_vectors, vec_out, nvecs, dim)
else:
mat_out = self._make_array(
(len(row_vectors), len(col_vectors)), dtype=np.float32
)
res = self._perform_sgemm(row_vectors, col_vectors, mat_out)
if not normalized:
res = np.multiply(res, self.mags)
return res
return 1 - res
elif metric == "euclidean":
row_vectors = self._check_vec(r)
if c:
col_vectors = self._check_vec(c)
else:
col_vectors = self.vecs
return distance.cdist(row_vectors, col_vectors, "euclidean")
def _nearest_sorting(self, d, k, normalized=True):
ner = self._make_array(shape=(len(d), k), dtype=np.uint32)
dist = self._make_array(shape=(len(d), k), dtype=np.float32)
for index, p in enumerate(d):
if normalized:
# FIXME: better variable name for b_sort
b_sort = bottleneck.argpartition(p, k)[:k]
pr_dist = np.take(p, b_sort)
# FIXME: better variable name for a_sorted
a_sorted = np.argsort(pr_dist)
indices = np.take(b_sort, a_sorted)
else:
d = | pd.Series(p) | pandas.Series |
import io
import os
import pandas as pd
import numpy as np
from datetime import date
from .io import ms_file_to_df
MINT_ROOT = os.path.dirname(__file__)
PEAKLIST_COLUMNS = ['peak_label', 'mz_mean', 'mz_width',
'rt_min', 'rt_max', 'intensity_threshold', 'peaklist']
def example_peaklist():
return pd.read_csv(f'{MINT_ROOT}/../tests/data/example_peaklist.csv')
def example_results():
return pd.read_csv(f'{MINT_ROOT}/../tests/data/example_results.csv')
RESULTS_COLUMNS = ['peak_label', 'peak_area', 'peak_n_datapoints', 'peak_max', 'peak_min', 'peak_median',
'peak_mean', 'peak_int_first', 'peak_int_last', 'peak_delta_int',
'peak_rt_of_max', 'peaklist', 'mz_mean', 'mz_width', 'rt_min', 'rt_max',
'intensity_threshold', 'peak_shape_rt', 'peak_shape_int']
MINT_RESULTS_COLUMNS = ['peak_label', 'ms_file',
'peak_area', 'peak_n_datapoints', 'peak_max', 'peak_min', 'peak_median',
'peak_mean', 'peak_int_first', 'peak_int_last', 'peak_delta_int',
'peak_rt_of_max', 'file_size', 'intensity_sum', 'ms_path', 'peaklist',
'mz_mean', 'mz_width', 'rt_min', 'rt_max', 'intensity_threshold',
'peak_shape_rt', 'peak_shape_int']
def integrate_peaks(ms_data, peaklist):
def base(peak):
slizE = slice_ms1_mzxml(ms_data, **peak)
shape = slizE[['retentionTime', 'm/z array', 'intensity array']]\
.groupby(['retentionTime', 'm/z array']).sum()\
.unstack()\
.sum(axis=1)
if len(shape) == 0:
results = peak.copy()
results.update({'peak_area': 0})
return results
peak_area = np.int64(shape.sum())
peak_med = np.float64(shape[shape != 0].median())
peak_avg = np.float64(shape[shape != 0].mean())
peak_max = np.float64(shape.max())
peak_min = np.float64(shape.min())
results = {}
float_list_to_comma_sep_str = lambda x: ','.join( [ str(np.round(i, 4)) for i in x ] )
results['peak_shape_rt'] = float_list_to_comma_sep_str( shape.index )
results['peak_shape_int'] = float_list_to_comma_sep_str ( shape.values )
results['peak_area'] = peak_area
results['peak_max'] = peak_max
results['peak_min'] = peak_min
results['peak_median'] = peak_med
results['peak_mean'] = peak_avg
results['peak_int_first'] = shape.values[0]
results['peak_int_last'] = shape.values[-1]
results['peak_delta_int'] = results['peak_int_last'] - results['peak_int_first']
results['peak_rt_of_max'] = shape[shape == peak_max].index
results['peak_n_datapoints'] = len(shape)
if len(results['peak_rt_of_max']) > 0:
results['peak_rt_of_max'] = np.mean(results['peak_rt_of_max'])
else:
results['peak_rt_of_max'] = np.nan
results.update(peak)
return results
base = np.vectorize(base)
results = base(to_peaks(peaklist))
results = pd.merge(pd.DataFrame(list(results)), peaklist[['peaklist', 'peak_label']], on=['peak_label'])
# Make sure all columns are present
for col in RESULTS_COLUMNS:
if not col in results.keys():
results[col] = np.NaN
return results[RESULTS_COLUMNS]
def integrate_peak(ms_data, mz_mean, mz_width, rt_min, rt_max,
intensity_threshold, peak_label):
peaklist = pd.DataFrame([dict(mz_mean=mz_mean,
mz_width=mz_width,
rt_min=rt_min,
rt_max=rt_max,
intensity_threshold=intensity_threshold,
peak_label=peak_label,
peaklist='single_peak')], index=[0])
result = integrate_peaks(ms_data, peaklist)
return result
def read_peaklists(filenames):
'''
Extracts peak data from csv files that contain peak definitions.
CSV files must contain columns:
- 'peak_label': str, unique identifier
- 'peakMz': float, center of mass to be extracted in [Da]
- 'peakMzWidth[ppm]': float, with of mass window in [ppm]
- 'rtmin': float, minimum retention time in [min]
- 'rtmax': float, maximum retention time in [min]
-----
Args:
- filenames: str or PosixPath or list of such with path to csv-file(s)
Returns:
pandas.DataFrame in peaklist format
'''
NEW_LABELS = {'peakLabel': 'peak_label',
'peakMz': 'mz_mean',
'peakMzWidth[ppm]': 'mz_width',
'rtmin': 'rt_min',
'rtmax': 'rt_max'}
if isinstance(filenames, str):
filenames = [filenames]
peaklist = []
for file in filenames:
if str(file).endswith('.csv'):
df = pd.read_csv(file, dtype={'peakLabel': str})\
.rename(columns=NEW_LABELS)
elif str(file).endswith('.xlsx'):
df = pd.read_excel(file, dtype={'peakLabel': str})\
.rename(columns=NEW_LABELS)
df['peaklist'] = os.path.basename(file)
if 'intensity_threshold' not in df.columns:
df['intensity_threshold'] = 0
df['peak_label'] = df['peak_label'].astype(str)
peaklist.append(df[PEAKLIST_COLUMNS])
peaklist = | pd.concat(peaklist) | pandas.concat |
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
import pandas as pd
import pandas.util.testing as tm
import pytest
from statsmodels.base import data as sm_data
from statsmodels.formula import handle_formula_data
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.discrete.discrete_model import Logit
# FIXME: do not leave commented-out, enable or move/remove
# class TestDates(object):
# @classmethod
# def setup_class(cls):
# nrows = 10
# cls.dates_result = cls.dates_results = np.random.random(nrows)
#
# def test_dates(self):
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
class TestArrays(object):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10)
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y'
cls.row_labels = None
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
def test_names(self):
data = self.data
np.testing.assert_equal(data.xnames, self.xnames)
np.testing.assert_equal(data.ynames, self.ynames)
def test_labels(self):
# HACK: because numpy master after NA stuff assert_equal fails on
# pandas indices
# FIXME: see if this can be de-hacked
np.testing.assert_(np.all(self.data.row_labels == self.row_labels))
class TestArrays2dEndog(TestArrays):
@classmethod
def setup_class(cls):
super(TestArrays2dEndog, cls).setup_class()
cls.endog = np.random.random((10, 1))
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
class TestArrays1dExog(TestArrays):
@classmethod
def setup_class(cls):
super(TestArrays1dExog, cls).setup_class()
cls.endog = np.random.random(10)
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:, None]
cls.xnames = ['x1']
cls.ynames = 'y'
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog.squeeze())
class TestDataFrames(TestArrays):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
tm.assert_series_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
tm.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
tm.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
class TestDataFramesWithMultiIndex(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
mi = pd.MultiIndex.from_product([['x'], ['1', '2']])
exog = pd.DataFrame(np.random.random((10, 2)), columns=mi)
exog_flattened_idx = pd.Index(['const', 'x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input, index=exog_flattened_idx)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog_flattened_idx,
columns=exog_flattened_idx)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
class TestLists(TestArrays):
@classmethod
def setup_class(cls):
super(TestLists, cls).setup_class()
cls.endog = np.random.random(10).tolist()
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))].tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
class TestRecarrays(TestArrays):
@classmethod
def setup_class(cls):
super(TestRecarrays, cls).setup_class()
cls.endog = np.random.random(9).view([('y_1', 'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'), ('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog,
self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog,
self.exog.view((float, 3), type=np.ndarray))
class TestStructarrays(TestArrays):
@classmethod
def setup_class(cls):
super(TestStructarrays, cls).setup_class()
cls.endog = np.random.random(9).view([('y_1', 'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'), ('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog,
self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog,
self.exog.view((float, 3), type=np.ndarray))
class TestListDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10).tolist()
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameList(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x1', 'x2'])
exog.insert(0, 'const', 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestArrayDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10)
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameArray(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x1', 'x2']) # names mimic defaults
exog.insert(0, 'const', 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
| tm.assert_frame_equal(self.data.orig_endog, self.endog) | pandas.util.testing.assert_frame_equal |
import pickle
import pandas as pd
import numpy as np
from random import sample
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
def sub_sampling(table, sampling_depth = 50):
'''
subsamping a table to get same sampling depth for all samples.
'''
sub_sampled_table = np.zeros(table.shape)
for idx in range(table.shape[0]):
pool = []
for ISM_idx, count in enumerate(table[idx].tolist()):
pool.extend([ISM_idx for i in range(int(count))])
sub_sampled_dict = Counter(sample(pool,sampling_depth))
for ISM_idx in sub_sampled_dict:
sub_sampled_table[idx, ISM_idx] = sub_sampled_dict[ISM_idx]
return sub_sampled_table
def bray_curtis_distance(table, sample1_id, sample2_id):
'''
compute Bray Curtis distance between two samples.
'''
numerator = 0
denominator = 0
sample1_counts = table[sample1_id]
sample2_counts = table[sample2_id]
for sample1_count, sample2_count in zip(sample1_counts, sample2_counts):
numerator += abs(sample1_count - sample2_count)
denominator += sample1_count + sample2_count
return numerator / denominator
def build_ISM_abundance_table(data_df, sampling_depth = 150):
'''
convert ISM dataframe to ISM abundance table.
'''
NT_SET = set(['A', 'C', 'G', 'T', '-'])
tmp = data_df[['continent', 'country/region']]
country_to_continent = {}
for i in range(tmp.shape[0]):
continent, country = tmp.iloc[i]['continent'], tmp.iloc[i]['country/region']
if country in country_to_continent:
pass
else:
country_to_continent[country] = continent
ISM_to_idx, idx_to_ISM, region_to_idx, idx_to_region = {}, {}, {}, {}
region_list = []
ISM_set = set([])
for region, count in data_df['country/region'].value_counts().items():
if count >= sampling_depth:
region_list.append(region)
tmp = data_df[data_df['country/region'] == region]
ISM_set.update(tmp['ISM'].unique())
country_to_continent = {country: country_to_continent[country] for country in country_to_continent if country in region_list}
continent_list = sorted(set(country_to_continent.values()))
color_list = ['red', 'orange', 'green', 'blue', 'brown', 'black', 'pink', 'mediumpurple','gold', 'lightskyblue', ]
continent_to_color = {}
for idx, item in enumerate(continent_list):
continent_to_color[item] = color_list[idx]
n_row, n_ISM = len(region_list), len(ISM_set)
raw_table = np.zeros((n_row, n_ISM))
for idx, ISM in enumerate(ISM_set):
ISM_to_idx[ISM] = idx
idx_to_ISM[idx] = ISM
for idx, region in enumerate(region_list):
region_to_idx[region] = idx
idx_to_region[idx] = region
for region_idx in range(n_row):
region = idx_to_region[region_idx]
tmp = data_df[data_df['country/region'] == region]
for ISM, count in tmp['ISM'].value_counts().items():
ISM_idx = ISM_to_idx[ISM]
raw_table[region_idx, ISM_idx] += count
return raw_table, n_row, n_ISM, country_to_continent, continent_to_color, idx_to_region
def region_pca_plot(INPUT_FOLDER, OUTPUT_FOLDER, sampling_depth=150):
'''
pca plot of ISM abundance table
'''
ISM_df = pd.read_csv('{}/ISM_df_with_correction.csv'.format(INPUT_FOLDER))
ISM_df['date'] = | pd.to_datetime(ISM_df['date']) | pandas.to_datetime |
# Diffusion Maps Framework implementation as part of MSc Data Science Project of student
# <NAME> at University of Southampton, MSc Data Science course
# Script 9: Network example taken on 20/08/2016 from
# https://networkx.readthedocs.io/en/stable/examples/drawing/weighted_graph.html
import openpyxl
import numpy as np
import pandas as pd
from math import sqrt
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.decomposition import PCA
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from diffusion_framework import main as diffusion_framework
matplotlib.style.use('ggplot')
# Choose set of normalised data
# datasource = './data/normalised/sqrtData.xlsx'
# datasource = './data/normalised/NormalisationData.xlsx'
datasource = './data/normalised/NormalisationByRowData.xlsx'
# datasource = './data/normalised/MinMaxScalerData.xlsx'
# datasource = './data/normalised/MinMaxScalerFeatureData.xlsx'
# datasource = './data/pcaData.xlsx'
dtsource = './data/datetimes.xlsx'
def main():
xlData = pd.ExcelFile(datasource)
sheetNames = xlData.sheet_names
dtExcel = | pd.ExcelFile(dtsource) | pandas.ExcelFile |
from flask import Flask, request, render_template, Response
from flask import make_response, jsonify
import sys
import os
import requests
import json
import threading
import time
import pandas as pd
import tempfile
import datetime
from collections import defaultdict
import namegenerator
sys.path.append(os.path.abspath("./"))
from apollo.Scraper.config import (
USER_AGENT,
YOUTUBE_VIDEO_URL,
)
if not(os.path.isdir(os.path.join('./', 'downloads'))):
os.mkdir(os.path.join('./', 'downloads'))
from apollo.Scraper.LinkParser import extract_id
from apollo.inference.inference import inference_v2, load_model
from apollo.Scraper.download_comments import download_comments
app = Flask(__name__)
app.secret_key = os.urandom(24)
LOAD_MODEL_THREAD = None
chart_data = [0, 0]
log_data = ''
DATA_STORE = defaultdict(list)
COMMENTS_STORE = []
@app.after_request
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
response.headers["X-UA-Compatible"] = "IE=Edge,chrome=1"
response.headers["Cache-Control"] = "public, max-age=0"
return response
def scrapper_v2(youtube_id, sensitivity, limit):
'''
Code modified from : https://github.com/egbertbouman/youtube-comment-downloader
:param youtube_id: ID of Youtube Video Link
:param sensitivity: Sensitivity tolerance level (To be used as threshold during inference)
:param limit: Number of comments to be scraped
:return: CSV file of output comments
'''
try:
# if LOAD_MODEL_THREAD is not None:
LOAD_MODEL_THREAD.join()
global chart_data
global log_data
global DATA_STORE
filename = '{}_{}_{}.csv'.format(youtube_id, sensitivity, limit)
chart_data = [0, 0]
log_data = ''
df = | pd.DataFrame(columns=['id', 'comment', 'score', 'sensitivity']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author:
<NAME>
Aarhus University
TECOLOGY.xyz
Production of collages of crops of flower and insect detections to be used on Zooniverse.
This script takes raw images and detection info (bounding box coordinates) for flowers and insects as input.
It crops detections and produces collages with four crops in each. It also produces a manifest to be used on the Zooniverse platform.
"""
import cv2
import pandas as pd
import csv
import os
from PIL import Image, ImageDraw
from math import ceil
### SET THE RELEVANT PATHS ###
camyear = "2018_NARS-02"
flower_detections = r"2018_NARS-02_submit_boxes_NewFormat_NoZeroes_Day.csv"
known_visitors = r"Dryas-12_All.csv"
save_root = r"/mnt/archive/Workspace_Hjalte/MaskRCNN/2020_01_22_VisitorDetectionOnFlowers_DetectionPhenologyTestSeries/collages_Monster_Final/"
flowers_and_visitors = camyear + "_" + "flowers_and_visitors_Temp.csv"
### USER SETTINGS ###
upscale_factor = 1 # Use this if the coordinates for the detections need to be upscaled
crops_per_collage = 4 # Number of flower crops in each collage
known_visitor_percentage = 0.5 # So far, we have used 2% known visitors. However, as we are presenting four crops at a time, we can divide this by 4.
collages_per_folder = 50000 # Number of collages per folder
### Mixing in known insects ###
"""
To mix in known insects, we will
-open the flower detections
-count the number of detections
-calculate number of known insect needed
-open insect annotations file
-sample the number needed
-add them to the flower detections
-mix the flower detections, so the insects appear randomly
-save the new flower detections file containing the known insects.
-Now the rest of the script can be run
"""
fl_detections = pd.read_csv(flower_detections, header = None) # Read the file containing the flower detections
fl_detections.columns=['filename', 'x_min', 'y_min', 'x_max', 'y_max'] # Name the columns
fl_detections['known_insect'] = str(0)
fl_detections['x_min'] = fl_detections['x_min']*upscale_factor # We might as well upscale the detection coordinates now we have the file open
fl_detections['y_min'] = fl_detections['y_min']*upscale_factor
fl_detections['x_max'] = fl_detections['x_max']*upscale_factor
fl_detections['y_max'] = fl_detections['y_max']*upscale_factor
fl_detections['x_min'] = fl_detections['x_min'].astype(int)
fl_detections['y_min'] = fl_detections['y_min'].astype(int)
fl_detections['x_max'] = fl_detections['x_max'].astype(int)
fl_detections['y_max'] = fl_detections['y_max'].astype(int)
det_count = fl_detections.shape[0] # Count the number of detections
visitors_needed = ceil(det_count/100 * known_visitor_percentage) # We calculate the number of known insects we need to add to the detection dataset. ceil rounds up to the next integer
if visitors_needed + det_count % crops_per_collage != 0: # If the combined number of images is not divisible by 4 (crops_per_collage), we will sample extra insects until it is. This ensures that we do not leave out any crops.
visitors_needed = visitors_needed + (crops_per_collage -((visitors_needed + det_count) % crops_per_collage))
known_visitors = pd.read_csv(known_visitors, header = None)
known_visitors.columns=['filename', 'x_min', 'y_min', 'x_max', 'y_max']
#known_visitors = known_visitors[['filename', 'x_min', 'y_min', 'x_max', 'y_max']]
known_visitors['known_insect'] = str(1)
sampled_visitors = known_visitors.sample(n = visitors_needed, replace = True) # Sample the insect visitor crops
fl_detections_visitors = pd.concat([fl_detections, sampled_visitors], axis=0) # Add them to the detection dataframe
fl_detections_visitors = fl_detections_visitors.sample(frac=1) # Mix the dataframe so the insects occur at random locations
fl_detections_visitors.to_csv(flowers_and_visitors, header = None, index = False) # Write the dataframe to csv
### PRINT SOME STUFF ###
number_flowers = len(fl_detections)
number_visitors_sampled = len(sampled_visitors)
number_collages = (number_flowers + number_visitors_sampled)//crops_per_collage
number_folders = (number_collages//collages_per_folder) + 1
print(f"Running on: {camyear}")
print(f"Number of flowers detected: {number_flowers}")
print(f"Number of visitors sampled: {number_visitors_sampled}")
print(f"Number of collages that will be produced: {number_collages}")
print(f"Number of folders that will hold the collages: {number_folders}")
###
manifest = []
def write_manifest(collage_name, list_of_crops, folder_name): # Write a manifest for the Zooniverse platform
TL_Path = list_of_crops[0][0] # Get the paths for the image for each crop in a collage
TR_Path = list_of_crops[1][0]
BL_Path = list_of_crops[2][0]
BR_Path = list_of_crops[3][0]
TL_Coordinates = str(list_of_crops[0][1]) + "_" + str(list_of_crops[0][2]) + "_" + str(list_of_crops[0][3]) + "_" + str(list_of_crops[0][4]) # Get the coordinates for the detection in each image
TR_Coordinates = str(list_of_crops[1][1]) + "_" + str(list_of_crops[1][2]) + "_" + str(list_of_crops[1][3]) + "_" + str(list_of_crops[1][4])
BL_Coordinates = str(list_of_crops[2][1]) + "_" + str(list_of_crops[2][2]) + "_" + str(list_of_crops[2][3]) + "_" + str(list_of_crops[2][4])
BR_Coordinates = str(list_of_crops[3][1]) + "_" + str(list_of_crops[3][2]) + "_" + str(list_of_crops[3][3]) + "_" + str(list_of_crops[3][4])
TL_KnownInsect = list_of_crops[0][5] # Binary score of whether the crop is a known insect. 0: no, 1: yes.
TR_KnownInsect = list_of_crops[1][5]
BL_KnownInsect = list_of_crops[2][5]
BR_KnownInsect = list_of_crops[3][5]
row = [collage_name, TL_Path, TL_Coordinates, TL_KnownInsect, TR_Path, TR_Coordinates, TR_KnownInsect, BL_Path, BL_Coordinates, BL_KnownInsect, BR_Path, BR_Coordinates, BR_KnownInsect, folder_name] # Header row
manifest.append(row)
def cut_crops(list_of_crops): # Function for cutting out the detection crops from the full images based on the bounding box coordinates
crops = []
for c in list_of_crops:
image = cv2.imread(c[0])
c_xmin = c[1]
c_ymin = c[2]
c_xmax = c[3]
c_ymax = c[4]
c_width = c_xmax - c_xmin
c_height = c_ymax - c_ymin
crop = image[c_ymin:c_ymin + c_height, c_xmin:c_xmin + c_width]
crops.append(crop)
return crops
def create_background(crops): # Create a background image in which the crops will be pasted to create a collage of four crops
background_width = (max([crops[0].shape[0], crops[1].shape[0], crops[2].shape[0], crops[3].shape[0]]) * 2) + 50 # The width of the background image is 2*widest crop + 50 pixels
background_height = (max([crops[0].shape[1], crops[1].shape[1], crops[2].shape[1], crops[3].shape[1]]) * 2) + 50 # The height of the background image is 2*tallest crop + 50 pixels
background_image = Image.new("RGB", (background_height, background_width), (255,255,255)) # Create the background image
# We'll draw a frame around the image and a cross in the middle'
draw = ImageDraw.Draw(background_image)
draw.line((0,int(background_width/2), background_height, int(background_width/2)), fill=(0,0,0, 255), width=2) # draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)]
draw.line(((int(background_height/2), 0, int(background_height/2), background_width)), fill=(0,0,0, 255), width=2)
draw.line(((0, 0, int(background_height), 0)), fill=(0,0,0, 255), width=2)
draw.line(((1, int(background_width), 1, 0)), fill=(0,0,0, 255), width=2)
draw.line(((int(background_height), int(background_width)-1, 0, int(background_width)-1)), fill=(0,0,0, 255), width=2)
draw.line(((int(background_height)-2, 0, int(background_height)-2, int(background_width))), fill=(0,0,0, 255), width=2)
return background_image # And we are ready to return our background image
def paste_crops(background_image, crops): # Function for pasting crops in the background image
index = 0
background_height, background_width = background_image.size
for c in crops:
c = cv2.cvtColor(c, cv2.COLOR_BGR2RGB)
c = Image.fromarray(c)
h, w = c.size
if index == 0:
x = int(background_width/4 - (w/2))
y = int(background_height/4 -(h/2))
elif index == 1:
x = int(background_width/4 - (w/2))
y = int((background_height/(4/3)) - (h/2))
elif index == 2:
x = int((background_width/(4/3)) - (w/2))
y = int(background_height/4 -(h/2))
else:
x = int((background_width/(4/3)) - (w/2))
y = int((background_height/(4/3)) - (h/2))
background_image.paste(c, (y, x, y + h, x + w))
collage = background_image
index += 1
return collage
######## PRODUCE CROP COLLAGES ######
collage_counter = 0 # Keep track of the number of collages produced
folder_counter = 0 # Keep track of the folder number
split_save_to = save_root + "/" + camyear + "_" + str(folder_counter)
os.makedirs(split_save_to)
with open(flowers_and_visitors, 'r') as csvfile:
detections = csv.reader(csvfile, delimiter=',')
lines = []
collage_index = 0
for line in detections:
line = [int(int(i)*upscale_factor) if i.isdigit() else i.replace('O:/Tech_TTH-AID','/mnt/bitcue_mountpoint') for i in line] # Convert the coordinates to digits in the line
lines.append(line)
if len(lines) >= crops_per_collage:
crops = cut_crops(lines)
background_image = create_background(crops)
collage = paste_crops(background_image, crops)
collage_name = camyear + "_" + str(collage_index).zfill(6) + ".jpg"
collage_path = split_save_to + "/" + collage_name
folder_name = camyear + "_" + str(folder_counter)
collage.save(collage_path)
write_manifest(collage_name, lines, folder_name)
collage_counter += 1
collage_index += 1
if collage_counter % 2500 == 0: # Print message to let the user know that the script is still running
print("2500 mark. Still going strong...")
if collage_counter == collages_per_folder:
print(str(collages_per_folder), " mark. Still going. Starting a new folder.")
manifest = | pd.DataFrame(manifest, columns = ["ID", "!TL_Path", "!TL_Coordinates", "!TL_KnownInsect","!TR_Path","!TR_Coordinates", "!TR_KnownInsect","!BL_Path","!BL_Coordinates", "!BL_KnownInsect", "!BR_Path", "!BR_Coordinates", "!BR_KnownInsect", "Folder_Name"]) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pytz
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
@pytest.fixture
def idx_expected(self):
idx = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B").tz_localize(
"US/Pacific"
)
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
assert expected.dtype == idx.dtype
return idx, expected
def test_to_series_keep_tz_deprecated_true(self, idx_expected):
# convert to series while keeping the timezone
idx, expected = idx_expected
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
assert msg in str(m[0].message)
tm.assert_series_equal(result, expected)
def test_to_series_keep_tz_deprecated_false(self, idx_expected):
idx, expected = idx_expected
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
def test_setitem_dt64series(self, idx_expected):
# convert to utc
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx_expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx_expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
def test_constructor_from_tzaware_datetimeindex(self, idx_expected):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx, expected = idx_expected
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
def test_set_axis_setattr_index(self):
# GH 6785
# set the index manually
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
def test_dti_set_index_reindex_with_tz(self):
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_assign_columns(self, float_frame):
float_frame["hi"] = "there"
df = float_frame.copy()
df.columns = ["foo", "bar", "baz", "quux", "foo2"]
tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)
tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)
class TestIntervalIndex:
def test_setitem(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
assert is_object_dtype(df["C"])
assert is_object_dtype(df["E"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_set_reset_index(self):
df = DataFrame({"A": range(10)})
s = | cut(df.A, 5) | pandas.cut |
#!/usr/bin/env python
# coding: utf-8
import streamlit as st
import streamlit.components.v1 as components
import matplotlib.pyplot as plt
import kayak
from PIL import Image
import numpy as np
filename_airport = './assets/airports.csv'
filename_aircraft = './assets/aircraft.csv'
output = './assets/output.xlsx'
blank = Image.open('./assets/blank.jpeg')
greenest = Image.open('./assets/planet-earth.png')
cheapest = Image.open('./assets/decrease.png')
shortest = Image.open('./assets/chronometer.png')
bg = Image.open('./assets/background.jpg')
plane = Image.open('./assets/plane.png')
import pandas as pd
df_airport = | pd.read_csv(filename_airport) | pandas.read_csv |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import svm
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc
import random
import warnings
warnings.filterwarnings("ignore")
def main():
tr_x, tr_y, ts_x, ts_y = preprocess()
assert(tr_x.shape == (2300,57))
assert(tr_y.shape == (2300, 1))
assert(ts_x.shape == (2301, 57))
assert(ts_y.shape == (2301, 1))
print("---------------------------------")
print("-------EXP 1---------------------")
print("---------------------------------")
# Train a SVM model
clf = svm.SVC(kernel='linear',probability=True)
clf.fit(tr_x, tr_y)
prediction = clf.predict(ts_x)
probs = clf.predict_proba(ts_x)
# Accuracy, precision, recall, and ROC curve
acc = accuracy_score(ts_y, prediction) * 100
precision = precision_score(ts_y, prediction) * 100
recall = recall_score(ts_y, prediction) * 100
probs = probs[:,1]
fpr, tpr, thresh = roc_curve(ts_y, probs)
roc_auc = auc(fpr, tpr)
print("Accuracy: {} %".format(acc))
print("Precision: {} %".format(precision))
print("Recall: {} %".format(recall))
# Plot ROC Curve and save plot
plt.title('ROC Curve')
plt.plot(fpr, tpr, label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('roc_exp1.png')
plt.clf()
print("---------------------------------")
print("-------EXP 2---------------------")
print("---------------------------------")
# Compute weight vector w
w = np.dot(clf.dual_coef_, clf.support_vectors_)
w = w.reshape((57,))
w = np.absolute(w)
svm_m = svm.SVC(kernel='linear')
acc_data = []
# Get new features with highest wight vector value
for m in range(2, 58):
new_tr_x, new_ts_x = select_features(w, m, tr_x, ts_x)
svm_m.fit(new_tr_x, tr_y)
predict_m = svm_m.predict(new_ts_x)
acc = accuracy_score(ts_y, predict_m) * 100
acc_data.append(acc)
print("M: {}, \t Accuracy: {}".format(m, acc))
# Plot Accuracy
plt.title('Number of features vs Accuracy')
plt.plot(range(2,58), acc_data)
plt.ylabel('Accuracy %')
plt.xlim([0, 57])
plt.ylim([30, 100])
plt.xlabel('number of features')
plt.savefig('acc_exp2.png')
plt.clf()
print("---------------------------------")
print("-------EXP 3---------------------")
print("---------------------------------")
svm_random = svm.SVC(kernel='linear')
acc_data_random = []
for m in range(2, 58):
random_tr_x, random_ts_x = select_random_features(m, tr_x, ts_x)
svm_random.fit(random_tr_x, tr_y)
predict_random = svm_random.predict(random_ts_x)
acc_random = accuracy_score(ts_y, predict_random) * 100
acc_data_random.append(acc_random)
print("M: {}, \t Accuracy: {}".format(m, acc_random))
# Plot Accuracy
plt.title('Number of features (Random) vs Accuracy')
plt.plot(range(2, 58), acc_data_random)
plt.ylabel('Accuracy %')
plt.xlim([0, 57])
plt.ylim([30, 100])
plt.xlabel('number of features')
plt.savefig('rand_acc_exp3.png')
plt.clf()
def preprocess():
data = pd.read_csv('spambase.data', header=None, sep=',', engine='c', na_filter= False, low_memory=False)
labels = data.iloc[:, 57]
# 1 1813
# 0 2788
# Split train test 50-50. Make sure # of classes are balanced in each
tr_x = pd.concat([data.head(906), data.tail(1394)])
tr_y = pd.concat([labels.head(906), labels.tail(1394)])
ts_x = | pd.concat([data[906:1813], data[1813:3207]]) | pandas.concat |
import ast
import csv
import sys, os
from pandas import DataFrame, to_datetime
from PyQt5 import uic
from PyQt5.QtChart import QChartView, QValueAxis, QBarCategoryAxis, QBarSet, QBarSeries, QChart
from PyQt5.QtCore import QFile, QTextStream, Qt
from PyQt5.QtGui import QPainter
from PyQt5.QtWidgets import QApplication, QComboBox, QHeaderView, QLineEdit, QMainWindow, QPushButton, QTableWidget, QTableView,QTableWidgetItem, QMessageBox, QFileDialog
from client.charts import Piechart, Barchart
from client.datahandler import DataHandler
from client.logs import PandasModel
from modules.Processor import ProcessData
from modules.Parser import export_to_file
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
#Load the UI Page
uic.loadUi('client/main.ui', self)
# upload
self.actionUpload.triggered.connect(self.upload)
# Exit
self.actionExit.triggered.connect(self.exit)
self.df = None
self.searchdata = None
# Export Protocols and IP
self.actionSummary.triggered.connect(self.Summary)
# Exporting table details
self.actionTableDetails.triggered.connect(self.TableDetails)
def popup(self):
'''
Popup Dialog to request file to be uploaded
'''
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setWindowTitle("New File")
msgBox.setText("Upload New File to Analyze.")
msgBox.setStandardButtons(QMessageBox.Open)
msgBox.buttonClicked.connect(self.upload)
msgBox.exec()
def upload(self):
'''
Uploads file to application
'''
fileName, _ = QFileDialog.getOpenFileName(None, "Select File", "", "Log Files (*.csv *.tsv *.json *.xls *.xlsx)")
if fileName is not '':
proc = ProcessData(fileName)
proc.parse()
data = proc.analyse()
self.df = DataFrame.from_dict(data)
self.display()
else:
self.showMessageBox("File Not Uploaded", "File Not Uploaded Successfully")
def display(self):
'''
Calls the data processor DataHandler and displays the result
'''
if self.df is not None:
self.data = DataHandler(self.df)
QApplication.processEvents()
# self.summary = self.data.getSummary()
self.chartseries = self.data.getSeries()
# Displays Charts and Tables
self.displaychart("attackchart", self.chartseries, "Attack Types")
self.displaytable("datatable", self.df)
self.displaytop("topip", self.data.getTopIPs(), ['IP Addresses', 'Count'])
self.displaytop("topports", self.data.getTopProtocols(), ['Protocol : Port', 'Count'])
QApplication.processEvents()
# Search Fields and Buttons
self.isatksearch = self.findChild(QComboBox, "isAtk")
self.ipsearch = self.findChild(QLineEdit, "ipaddr")
self.protocolsearch = self.findChild(QLineEdit, "protocol")
self.portsearch = self.findChild(QLineEdit, "port")
self.atksearch = self.findChild(QLineEdit, "atk")
self.timesearch = self.findChild(QLineEdit, "time")
self.searchbtn = self.findChild(QPushButton, "searchbtn")
self.searchbtn.clicked.connect(self.search)
self.clearbtn = self.findChild(QPushButton, "clearbtn")
self.clearbtn.clicked.connect(self.clear)
QApplication.processEvents()
self.bargraph()
QApplication.processEvents()
def bargraph(self):
'''
Processes and Creates Bar Graph.
'''
self.barchart = self.findChild(QChartView, "attackgraph")
bardata = self.data.getBar()
chartobj = Barchart(bardata)
chartseries = chartobj.getSeries()
# create QChart object and add data
chart = QChart()
chart.addSeries(chartseries)
chart.setTitle("Attacks Over the Past 12 Months")
chart.setAnimationOptions(QChart.SeriesAnimations)
axisX = QBarCategoryAxis()
axisX.append(chartobj.getKeys())
chart.addAxis(axisX, Qt.AlignBottom)
axisY = QValueAxis()
axisY.setRange(0, chartobj.getMax())
chart.addAxis(axisY, Qt.AlignLeft)
chart.legend().setVisible(False)
self.barchart.setChart(chart)
def clear(self):
'''
Clears Search Form
'''
self.isatksearch.setCurrentIndex(0)
self.ipsearch.clear()
self.protocolsearch.clear()
self.portsearch.clear()
self.atksearch.clear()
self.timesearch.clear()
self.pdmdl.clear()
self.searchdata = None
self.logtable.setModel(self.pdmdl)
def displaychart(self, widgetname, chartseries, title):
'''
Displays PieChart
------------------
widgetname : str of widget to call in .ui file
chartseries: PyQT Series to be displayed on chart
title: str of title to be header of chart
'''
self.piechart = self.findChild(QChartView, widgetname)
chartdata = Piechart(chartseries, title).create()
self.piechart.setChart(chartdata)
self.piechart.setRenderHint(QPainter.Antialiasing)
def displaytop(self, widgetname, data, header):
'''
Displays Top IP/Protocols Table
Parameters
------------------
widgetname : str of widget to call in .ui file
data: dict of top ip/protocol data to display
title: str of title to be header of chart
'''
table = self.findChild(QTableWidget, widgetname)
table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
table.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
table.setColumnCount(2)
table.setRowCount(5)
table.setHorizontalHeaderLabels(header)
index = 0
for k,v in data.items():
table.setItem(int(index),0, QTableWidgetItem(k))
table.setItem(int(index),1, QTableWidgetItem(str(v)))
index += 1
def displaytable(self, widgetname, data):
'''
Displays Log Table
Parameters
------------------
widgetname: str of widget to call in .ui file
data: Pandas Dataframe
'''
self.logtable = self.findChild(QTableView, widgetname)
self.logtable.setSortingEnabled(True)
self.pdmdl = PandasModel(data)
self.logtable.setModel(self.pdmdl)
self.logtable.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.logtable.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)
def search(self):
'''
Checks Search Form to be sent to table
'''
# get searchquery as dictionary
searchquery = {'IsAtk': self.isatksearch.currentText(), 'IP': self.ipsearch.text(), 'Protocol': self.protocolsearch.text(), 'Port': self.portsearch.text(), 'Atk': self.atksearch.text(), 'Time': self.timesearch.text()}
# check if search query is not empty
searchquery = {k: v for k, v in searchquery.items() if v != ''}
atk = {'Yes': 1, 'No':0}
if searchquery.get('IsAtk', None) == '-':
del searchquery['IsAtk']
elif searchquery.get('IsAtk', None) != None:
searchquery['IsAtk'] = atk[searchquery['IsAtk']]
# check if the searchquery is empty
if bool(searchquery) is True:
self.searchdata = self.pdmdl.search(searchquery)
if self.searchdata is not None:
self.logtable.setModel(PandasModel(self.searchdata, search=True))
else:
self.clear()
else:
self.clear()
def Summary(self):
'''
Exports summary
'''
protocol = self.data.getTopProtocols()
ip = self.data.getTopIPs()
fileName = QFileDialog.getSaveFileName(self, "Save File", "", "Log Files (*.csv *.tsv *.json *.xls *.xlsx)")
if fileName[0]:
export_data = [x + y for x, y in zip(protocol.items(), ip.items())]
export_dataframe = ['Protocol & Ports','Counts','IP Address','Counts']
export_dataframe = | DataFrame(export_data, columns=export_dataframe) | pandas.DataFrame |
from copy import deepcopy
import os
import pandas as pd
from pandas.util.testing import assert_frame_equal
import pytest
import cdpybio as cpb
FL = [os.path.join(cpb._root, 'tests', 'express', 'results.{}.xprs'.format(x))
for x in ['a','b','c']]
TG = os.path.join(cpb._root, 'tests', 'express', 'tg.tsv')
TDF = pd.DataFrame([[ 68., 44., 50.],
[ 98., 33., 25.],
[ 82., 27., 24.]],
index=['TA', 'TB', 'TC'],
columns=FL)
TDF.index.name = 'transcript'
GDF = pd.DataFrame([[ 166., 77., 75.],
[ 82., 27., 24.]],
index=['GA', 'GB'],
columns=FL)
GDF.index.name = 'gene'
class TestCombineExpressOutput:
def test_normal(self):
df = cpb.express.combine_express_output(FL)[0]
assert_frame_equal(df, TDF, check_names = True)
def test_none(self):
assert cpb.express.combine_express_output(FL)[1] is None
def test_est_counts(self):
df = pd.DataFrame([[ 25., 54., 81.],
[ 34., 78., 69.],
[ 71., 88., 35.]],
index=TDF.index,
columns=TDF.columns)
df.index.name = 'transcript'
df2 = cpb.express.combine_express_output(FL, column='est_counts')[0]
assert_frame_equal(df, df2)
df = pd.DataFrame([[ 59., 132., 150.],
[ 71., 88., 35.]],
index=GDF.index,
columns=GDF.columns)
df.index.name = 'gene'
df2 = cpb.express.combine_express_output(
FL, column='est_counts', tg=TG)[1]
assert_frame_equal(df, df2)
def test_gene(self):
df = cpb.express.combine_express_output(FL, tg=TG)[1]
assert_frame_equal(df, GDF)
def test_missing_values(self):
with pytest.raises(SystemExit):
cpb.express.combine_express_output(
[os.path.join(cpb._root, 'tests', 'express', 'results.a.xprs'),
os.path.join(cpb._root, 'tests', 'express',
'results.missing.xprs')])
def test_names(self):
df = deepcopy(TDF)
df.columns = ['a','b','c']
df2 = cpb.express.combine_express_output(FL, names=['a','b','c'])[0]
assert_frame_equal(df, df2)
df = deepcopy(GDF)
df.columns = ['a','b','c']
df2 = cpb.express.combine_express_output(FL, names=['a','b','c'],
tg=TG)[1]
| assert_frame_equal(df, df2) | pandas.util.testing.assert_frame_equal |
from pandas_datareader import data as web
import pandas as pd
import datetime as dt
import numpy as np
import requests
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
class PriceReader:
def __init__(self, brTickerList, usTickerList, startDate='2018-01-01'):
self.brTickerList = brTickerList
self.usTickerList = usTickerList
self.startDate = startDate
self.fillDate = dt.datetime.today().strftime('%m-%d-%Y')
self.df = pd.DataFrame(columns=['Date'])
def load(self):
# Read BR market data
if((self.brTickerList != None) and (len(self.brTickerList) > 0)):
self.df = self.readData(self.brTickerList, self.startDate).reset_index()
self.df.columns = self.df.columns.str.replace('\.SA','')
# Read US Market data
if((self.usTickerList != None) and (len(self.usTickerList) > 0)):
self.df = self.df.merge(self.readUSData(self.usTickerList, self.startDate).reset_index(), how='outer', on='Date')
self.df = self.df.set_index('Date').sort_index()
# self.df.to_csv('debug.csv', sep='\t')
indexList = ['^BVSP', '^GSPC', 'BRLUSD=X']
self.brlIndex = self.readUSData(indexList, self.startDate).reset_index()
self.brlIndex.rename(columns={'^BVSP':'IBOV', '^GSPC':'S&P500', 'BRLUSD=X':'USD'}, inplace=True)
self.brlIndex = self.brlIndex.set_index('Date')
# display(self.brlIndex)
def setFillDate(self, date):
self.fillDate = date
def fillCurrentValue(self, row):
row['PRICE'] = self.getCurrentValue(row['SYMBOL'], self.fillDate)
return row
def readData(self, code, startDate='2018-01-01'):
s=''
for c in code:
s += c + '.SA '
tks = yf.Tickers(s)
dfs = tks.history(start=startDate)[['Close']]
dfs.columns = dfs.columns.droplevel()
return dfs
def readUSData(self, code, startDate='2018-01-01'):
s=''
for c in code:
s += c + ' '
tks = yf.Tickers(s)
dfs = tks.history(start=startDate)[['Close']]
dfs.columns = dfs.columns.droplevel()
return dfs
def getHistory(self, code, start='2018-01-01'):
return self.df.loc[start:][code]
def getCurrentValue(self, code, date=None):
if(date == None):
return self.df.iloc[-1][code]
available, date = self.checkLastAvailable(self.df, date, code)
if available:
return self.df.loc[date][code]
return self.df.iloc[0][code]
def getIndexHistory(self, code, end):
ret = self.brlIndex.loc[:end][code]
return ret.dropna()
def getIndexCurrentValue(self, code, date=None):
if(date == None):
return self.brlIndex.iloc[-1][code]
available,date = self.checkLastAvailable(self.brlIndex, date, code)
if available:
return self.brlIndex.loc[date][code]
return self.brlIndex.iloc[0][code]
def checkLastAvailable(self, dtframe, loockDate, field):
date = pd.to_datetime(loockDate)
day = pd.Timedelta(1, unit='d')
#Look for last available date
while((not (date in dtframe.index)) or | pd.isna(dtframe.loc[date][field]) | pandas.isna |
# -*- coding: utf-8 -*-
"""
@author: mje
@emai: <EMAIL>
"""
import numpy as np
import mne
import matplotlib.pyplot as plt
import pandas as pd
import itertools
from my_settings import (tf_folder, subjects_test, subjects_ctl, subjects_dir)
plt.style.use("ggplot")
b_df = pd.read_csv("/Volumes/My_Passport/agency_connectivity/results/" +
"behavioural_results.csv")
def calc_ISPC_time_between(data, chan_1=52, chan_2=1):
result = np.empty([data.shape[0]])
for i in range(data.shape[0]):
result[i] = np.abs(
np.mean(
np.exp(1j * (np.angle(data[i, chan_1, window_start:window_end])
- np.angle(data[i, chan_2, window_start:
window_end])))))
return result
# load labels
labels = mne.read_labels_from_annot(
"fs_p2", parc='selected_lbl', regexp="Bro", subjects_dir=subjects_dir)
label_names = ["sens_motor_lh", "sens_motor_rh", "BA39_lh", "BA39_rh",
"audi_lh", "audi_rh", "BA46_lh", "BA46_rh"]
for j in range(len(labels)):
labels[j].name = label_names[j]
# make combinations of label indices
combinations = []
label_index = [0, 1, 2, 3, 4, 5, 6, 7]
for L in range(0, len(label_index) + 1):
for subset in itertools.combinations(label_index, L):
if len(subset) == 2:
combinations.append(subset)
# make dict with names and indices
label_dict = {}
for comb in combinations:
fname = labels[comb[0]].name + "_" + labels[comb[1]].name
print(fname)
label_dict.update({fname: [comb[0], comb[1]]})
# label_dict = {
# "ba_1_4_r": [1, 52],
# "ba_1_4_l": [0, 51],
# "ba_4_4": [51, 52],
# "ba_1_1": [0, 1],
# "ba_4_39_l": [49, 51],
# "ba_4_39_r": [50, 52],
# "ba_39_39": [49, 50],
# }
bands = ["alpha", "beta", "gamma"]
# bands = ["beta"]
# subjects = ["p9"]
# labels = list(np.load(data_path + "label_names.npy"))
times = np.arange(-2000, 1001, 1.95325)
times = times / 1000.
window_start, window_end = 1024, 1280
results_all = | pd.DataFrame() | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
import pytest
from sklearn.exceptions import NotFittedError
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from testfixtures import LogCapture
import greykite.common.constants as cst
from greykite.algo.forecast.silverkite.forecast_silverkite import SilverkiteForecast
from greykite.common.data_loader import DataLoader
from greykite.common.features.timeseries_features import convert_date_to_continuous_time
from greykite.common.python_utils import assert_equal
from greykite.common.testing_utils import daily_data_reg
from greykite.common.testing_utils import generate_df_for_tests
from greykite.common.testing_utils import generate_df_with_reg_for_tests
from greykite.sklearn.estimator.base_silverkite_estimator import BaseSilverkiteEstimator
from greykite.sklearn.estimator.testing_utils import params_components
@pytest.fixture
def params():
autoreg_dict = {
"lag_dict": {"orders": [7]},
"agg_lag_dict": {
"orders_list": [[7, 7 * 2, 7 * 3]],
"interval_list": [(7, 7 * 2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()}
uncertainty_dict = {
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 5,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}}
return {
"origin_for_time_vars": convert_date_to_continuous_time(datetime.datetime(2018, 1, 3)),
"extra_pred_cols": ["ct1", "regressor1", "regressor2"],
"train_test_thresh": None,
"training_fraction": None,
"fit_algorithm": "sgd",
"fit_algorithm_params": {"alpha": 0.1},
"daily_event_df_dict": None,
"changepoints_dict": None,
"fs_components_df": pd.DataFrame({
"name": ["tow"],
"period": [7.0],
"order": [3],
"seas_names": [None]}),
"autoreg_dict": autoreg_dict,
"min_admissible_value": None,
"max_admissible_value": None,
"uncertainty_dict": uncertainty_dict
}
@pytest.fixture
def daily_data():
return generate_df_for_tests(
freq="D",
periods=1000,
train_start_date=datetime.datetime(2018, 1, 1),
conti_year_origin=2018)
@pytest.fixture
def daily_data_with_reg():
return daily_data_reg()
@pytest.fixture
def X():
periods = 11
return pd.DataFrame({
cst.TIME_COL: | pd.date_range("2018-01-01", periods=periods, freq="D") | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 09:27:49 2020
@author: <NAME>
"""
import pickle
import pandas as pd
import numpy as np
from country import country
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.optimize import dual_annealing
from scipy.optimize import brute
from scipy.interpolate import interp1d
from scipy.ndimage.filters import uniform_filter1d
import psutil
from functools import partial
import multiprocessing as mp
from tqdm import tqdm_notebook as tqdm
import pdb
from datetime import date, datetime, timedelta
import time
from pathlib import Path
from matplotlib import pyplot as plt
import statsmodels.api as sm
from sklearn import linear_model
import matplotlib.patches as mpatches
import country_converter as coco
import math
import seaborn as sns
# --------------------------------------------------------
# Global variables, chosen cohorts of data and estimates
# --------------------------------------------------------
from param_simple import *
# ----------------------
# Main class
# ----------------------
class solveCovid:
def __init__(self,iso2: str): # eg 'US'
self.iso2 = iso2
# Policy strategies for forecast
self.policy = 'optim' # ['optim', 'linear']
self.phi_option = 'fit' # ['fit','exo']: Fit phi to latest data or specify as exogenous
self.phi_exo = 2.5e-9 # weight on mobility in social welfare function
self.phi_min = 1e-13 # Lowerbound for phi - authorities care about output
# Infection rate model for forecast
self.gamma_tilde_model = 'AR1' # ['AR1','AR2','shock']
self.gamma_shock_length = 10 # Shock gamma_tilde for x days
self.gamma_shock_depth = 0.5 # Daily increment of gamma
self.default_init_single = default_init_single
self.default_bounds_single = default_bounds_single
# Vaccine assumptions
self.vac_assump = 'vac_base' # Vaccination scenarios: ['vac_base','vac_worse','vac_better']
self.vac_receiver = 'S+R' # Vaccines given to S or S+R? ['S only','S+R']
self.effi_one = 0.5 # Efficacy after one dose in %
self.effi_two = 0.95 # Efficacy after two doses in %
self.target_weight = 0.7 # How targeted vaccine distribution is (1 = sequenced from eldest to youngest, 0 is random)
self.vac_base_cover = 1 # Baseline: (already started): % of effective coverage by December 2021 (to be controlled by country-specific scaling factor below)
self.vac_base_delayedstart = '2021-06-30' # Baseline: (hasn't started): first date of vaccination
self.vac_base_delayedcover = 0.75 # Baseline: (hasn't started): % of contracted dosages deployed by December 2021
self.vac_worse_cover = 0.3 # Worse (started): Use by end of 2021
self.vac_worse_delayedstart = '2021-09-30' # Worse (hasn't started): Starting date
self.vac_worse_delayedcover = 0.3 # Worse (hasn't started): Use by end of 2021
self.vac_better_cover = 1.3
self.vac_better_delayedstart = '2021-06-30'
self.vac_better_delayedcover = 1
# Reinfection and loss of immunity
self.reinfect = 'immune' # ['immune','reinfect']
self.r_re1_R = np.log(2)/10000 # Baseline: R loses immunity after 3 years
self.r_re1_V = np.log(2)/10000 # Baseline: V loses immunity after 3 years
self.r_re2_R = np.log(2)/60 # Downside risk: R loses immunity after 60 days, approx 1% of R lose immunity each day
self.r_re2_V = np.log(2)/60 # Downside risk: V loses immunity after 60 days, approx 1% of V lose immunity each day
# Death probabilities
self.pdth_assump = 'martingale' # ['martingale','treatment']
self.pdth_min = 0.005 # Lowerbound on death probability - countries with very few cases still think there is death probability
self.pdth_halflife = 60 # Halflife for treatment case; no. of days it takes to close half the gap of current and assumed minimum death prob
self.pdth_theta = np.exp(-np.log(2)/self.pdth_halflife)
# --------------- 1. Preliminary: Get the data ------------------------
def prelim(self):
iso2 = self.iso2
self.N = df1.fillna(method='ffill')['population'][iso2].iloc[-1]
df2 = df1.iloc[:,df1.columns.get_level_values(1)==iso2][[
'total_cases','total_deaths','new_cases','new_deaths',
'google_smooth','icu_patients','hosp_patients','reproduction_rate',
'new_tests','tests_per_case','aged_70_older',
'vac_total','vac_people',
'vac_fully']][df1['total_cases'][iso2] > virus_thres]
df2 = df2.droplevel('iso2',axis=1)
df2['vac_total'] = df2['vac_total'].interpolate()
df2['vac_people'] = df2['vac_people'].interpolate()
if iso2 == 'AU' or iso2 == 'SA': # Countries with no breakdowns; do manual approximation
df2['vac_partial'] = 0.8 * df2['vac_total']
df2['vac_fully'] = 0.2 * df2['vac_total']
else : # For most countries,
date1 = df2['vac_fully'].first_valid_index() # Next 2 lines fill NA in 'vac_fully', so vac_partial is defined
df2['vac_fully'].iloc[:df2.index.get_loc(date1)-1] = 0
df2['vac_fully'] = df2['vac_fully'].interpolate()
df2['vac_partial'] = df2['vac_people'] - df2['vac_fully']
df2 = df2.fillna(0) # Replace NaN by 0 - deaths and vaccinations
PopulationI = df2['total_cases'][0]
PopulationD = df2['total_deaths'][0]
if PopulationD==0:
PopulationD = 0
PopulationR = 5
else:
PopulationR = PopulationD * 5
PopulationCI = PopulationI - PopulationD - PopulationR # Undetected and infectious cases
self.cases_data_fit = df2['total_cases'].tolist()
self.deaths_data_fit = df2['total_deaths'].tolist()
self.newcases_data_fit = df2['new_cases'].tolist()
self.newdeaths_data_fit = df2['new_deaths'].tolist()
self.balance = self.cases_data_fit[-1] / max(self.deaths_data_fit[-1], 10) / 3
date_day_since100 = pd.to_datetime(df2.index[0])
self.maxT = (default_maxT - date_day_since100).days + 1
self.mobility_vec = df2['google_smooth'].values
self.T = len(df2)
self.t_cases = np.arange(0,self.T)
self.mobility_interp = interp1d(self.t_cases,self.mobility_vec,bounds_error=False,fill_value=0.,kind='cubic')
self.GLOBAL_PARAMS = (self.N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v)
self.gamma_0_days = 1 # average of gamma_t during first n days becomes the target
# Compute vaccination parameters
self.vac_partial = df2['vac_partial'].values
self.vac_fully = df2['vac_fully'].values
#self.vac_contracted = 1000*df_vac.loc[iso2]['No. of people covered (thousands)']/self.N
df2['V_'] = self.N * (self.effi_one*df2['vac_partial']
+ self.effi_two*df2['vac_fully'])/100 # V = expected number of effectively vaccinated persons
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df_v = df2.reindex(ix)
# Vaccination assumptions
if self.iso2 in ['GB','US']:
vac_scale = 1
elif self.iso2 in ['BE','FR','DE','IT','NL','PL','SG','ES','CH','RO','CL','CA']:
vac_scale = 0.8
elif self.iso2 in ['AU','SA','SE','TR']:
vac_scale = 0.65
elif self.iso2 in ['AR','BR','MX','RU']:
vac_scale = 0.50
elif self.iso2 in ['ID','IN','JP','KR','MY','TH']:
vac_scale = 0.25
elif self.iso2 in ['ZA']:
vac_scale = 0.10
else:
vac_scale = 0.50
print('Missing vaccine assumption for selected country')
if self.vac_assump == 'vac_base':
if df2['V_'][-1] > 0: # already started
df_v['V_'].loc['2021-12-31'] = self.vac_base_cover * vac_scale * self.N
elif df2['V_'][-1] == 0: # If has not started, assume starting by xxx and cover xxx at year end
df_v['V_'].loc[self.vac_base_delayedstart] = 100 # 100 = assumed number of effectively vaccinated on first day
df_v['V_'].loc['2021-12-31'] = self.vac_base_delayedcover* vac_scale*self.N # partial orders filled by year end
elif self.vac_assump == 'vac_worse':
if df2['V_'][-1] > 0:
df_v['V_'].loc['2021-12-31'] = self.vac_worse_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_worse_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_worse_delayedcover* vac_scale*self.N
elif self.vac_assump == 'vac_better':
if df2['V_'][-1]>0:
df_v['V_'].loc['2021-12-31'] = self.vac_better_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_better_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_better_delayedcover* vac_scale*self.N
df_v['V_'] = df_v['V_'].interpolate()
df_v['V_'] = df_v['V_'].clip(0,self.N)
self.df2 = df2
self.df_v = df_v
print(f'Data preparation for {iso2} done')
# --------------------------3 . SEIR model ------------------
def step_seir(self, t, x, gamma_t, p_dth) -> list:
"""
SEIR model building on DELPHI v.3
Features 16 distinct states, taking into account undetected, deaths, hospitalized and
recovered
[0 S, 1 E, 2 I, 3 UR, 4 DHR, 5 DQR, 6 UD, 7 DHD, 8 DQD, 9 R, 10 D,
11 TH, 12 DVR,13 DVD, 14 DD, 15 DT, 16 V]
"""
S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT, V = x
r_v = self.df_v['V_'].iloc[t+1] - self.df_v['V_'].iloc[t]
# Reinfection parameters
if self.reinfect == 'immune':
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
elif self.reinfect == 'reinfect':
if t <= self.T:
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
else:
r_re_R = self.r_re2_R
r_re_V = self.r_re2_V
# Vaccination recipients (S, or S+R)
if self.vac_receiver == 'S only':
zeta = 1
elif self.vac_receiver == 'S+R':
zeta = S/(S+R)
else:
print('Re-specify vaccine recipient choice')
# Main equations
S1 = S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V - r_v * zeta
if S1 < 0: # Vaccination reaches saturating point
S1 = 0
r_v = (S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V) /zeta
E1 = E + gamma_t * S * I / self.N - r_i * E
I1 = I + r_i * E - r_d * I
AR1 = AR + r_d * (1 - p_dth) * (1 - p_d) * I - r_ri * AR
DHR1 = DHR + r_d * (1 - p_dth) * p_d * p_h * I - r_rh * DHR
DQR1 = DQR + r_d * (1 - p_dth) * p_d * (1 - p_h) * I - r_ri * DQR
AD1 = AD + r_d * p_dth * (1 - p_d) * I - r_dth * AD
DHD1 = DHD + r_d * p_dth * p_d * p_h * I - r_dth * DHD
DQD1 = DQD + r_d * p_dth * p_d * (1 - p_h) * I - r_dth * DQD
R1 = R + r_ri * (AR + DQR) + r_rh * DHR - r_re_R*R - r_v * (1-zeta)
D1 = D + r_dth * (AD + DQD + DHD)
# Helper states
TH1 = TH + r_d * p_d * p_h * I
DVR1 = DVR + r_d * (1 - p_dth) * p_d * p_h * p_v * I - r_rv * DVR
DVD1 = DVD + r_d * p_dth * p_d * p_h * p_v * I - r_dth * DVD
DD1 = DD + r_dth * (DHD + DQD)
DT1 = DT + r_d * p_d * I
V1 = V + r_v -r_re_V*V
x1 = [S1, E1, I1, AR1, DHR1, DQR1, AD1, DHD1, DQD1,
R1, D1, TH1, DVR1, DVD1, DD1, DT1, V1]
return x1
# ------------------ X. Construct initial conditions
def initial_states_func(self,k):
N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v = self.GLOBAL_PARAMS
p_dth0 = self.newdeaths_data_fit[0]/(r_dth*PopulationCI) # Set p_dth0 to match D1-D0 to newdeaths_data_fit
E_0 = PopulationCI / p_d * k
I_0 = PopulationCI / p_d * k
UR_0 = (PopulationCI / p_d - PopulationCI) * (1 - p_dth0)
DHR_0 = (PopulationCI * p_h) * (1 - p_dth0)
DQR_0 = PopulationCI * (1 - p_h) * (1 - p_dth0)
UD_0 = (PopulationCI / p_d - PopulationCI) * p_dth0
DHD_0 = PopulationCI * p_h * p_dth0
DQD_0 = PopulationCI * (1 - p_h) * p_dth0
R_0 = PopulationR / p_d
D_0 = PopulationD / p_d
S_0 = N - (E_0 +I_0 +UR_0 +DHR_0 +DQR_0 +UD_0 +DHD_0 +DQD_0 +R_0 +D_0)
TH_0 = PopulationCI * p_h
DVR_0 = (PopulationCI * p_h * p_v) * (1 - p_dth0)
DVD_0 = (PopulationCI * p_h * p_v) * p_dth0
DD_0 = PopulationD
DT_0 = PopulationI
V_0 = 0
x_init = [
S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0
]
return x_init
# Find k=k1,k2 that matches gamma_0 to 2.08 (R0=6 equivalent)
def loss_gamma0(self,k):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
x_init = self.initial_states_func(k)
(S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0) = x_init
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(self.gamma_0_days): # Target first n days
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
x_0 = x_1
gamma_t_vec.append(gamma_t)
gamma_0 = np.mean(gamma_t_vec)
loss = (gamma_0 - (r_d*6) )**2 # gamma_0 equivalent to R0=6 is 2.08
return loss
def fit_gamma0(self):
output = dual_annealing(
self.loss_gamma0,
x0 = [5],
bounds = [(1,50)],
)
k_star = output.x
return k_star
def get_initial_conditions(self):
if Path(f'../params/param_fixed/kstar.csv').exists():
df = pd.read_csv(f'../params/param_fixed/kstar.csv')
kstar = df[self.iso2].values[0]
else:
kstar = self.fit_gamma0()[0] # find kstar that matches gamma_0 to target
x_init = self.initial_states_func(kstar)
return x_init
# -------------------- x. Implied gamma_t and pdth_t in-sample -------------------
def gamma_t_compute(self):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
p_dth_vec = []
x_init = self.get_initial_conditions()
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_init
S_vec = [S_0]
E_vec = [E_0]
I_vec = [I_0]
DT_vec = [DT_0]
DD_vec = [DD_0]
DHR_vec = [DHR_0]
DHD_vec = [DHD_0]
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(len(newcases)):
# Work backwards to compute 'exact' gamma_t and p_dth
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_1
x_0 = x_1
gamma_t_vec.append(gamma_t)
p_dth_vec.append(p_dth)
S_vec.append(S_0)
I_vec.append(I_0)
E_vec.append(E_0)
DT_vec.append(DT_0)
DD_vec.append(DD_0)
DHR_vec.append(DHR_0)
DHD_vec.append(DHD_0)
self.df2['gamma_t'] = gamma_t_vec
self.df2['pdth_t'] = p_dth_vec
self.S_vec = S_vec # In-sample estmates, useful for phi calculation later on
self.I_vec = I_vec
self.DHR_vec = DHR_vec # For fitting death probability
self.DHD_vec = DHD_vec
HD_HR = np.array(self.DHR_vec) + np.array(self.DHD_vec)
self.df2['HD_HR'] = 100*HD_HR[:-1]/self.N
# gamma_t_sm = uniform_filter1d(gamma_t_vec, size=6, mode='nearest')
# self.df2['gamma_sm'] = gamma_t_sm
return gamma_t_vec, p_dth_vec
# -------------------- x. Estimating the model -----------
def gamma_func(self, params):
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(m_t))
beta0, beta1 = params
gamma_vec = beta0*np.exp(beta1* m_t)
return gamma_vec
def loss_betas(self, params) -> float:
gamma_model = self.gamma_func(params)
loss = sum( (self.df2['gamma_t'].values[:len(gamma_model)] - gamma_model)**2 )
return loss
def fitmodel(self):
# A. Fit beta0 and beta1
x0 = self.default_init_single
bounds_0 = self.default_bounds_single
output = dual_annealing(
self.loss_betas,
x0 = x0,
bounds = bounds_0,
)
best_betas = output.x
self.best_betas = best_betas
# B. Fit the residual (gamma_tilde) to AR models
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(self.df2))
beta0, beta1 = self.best_betas
self.df2['gamma_mob'] = beta0*np.exp(beta1* m_t)
self.df2['gamma_tilde'] = self.df2['gamma_t'] - self.df2['gamma_mob']
self.df2['gamma_tilde_sm'] = uniform_filter1d(self.df2['gamma_tilde'],
size=21, mode='reflect')
self.df2['gamma_tilde_resid'] = self.df2['gamma_tilde'] - self.df2['gamma_tilde_sm']
y = self.df2['gamma_tilde_sm']
self.df2['gamma_tilde_sm_lag1'] = self.df2['gamma_tilde_sm'].shift(1) # No constant term
self.df2['gamma_tilde_sm_lag2'] = self.df2['gamma_tilde_sm'].shift(2)
reg_AR1 = sm.OLS(y,self.df2['gamma_tilde_sm_lag1'],missing='drop').fit()
reg_AR2 = sm.OLS(y,self.df2[['gamma_tilde_sm_lag1','gamma_tilde_sm_lag2']],missing='drop').fit()
best_rho1 = reg_AR1.params[0]
best_rho1 = np.clip(best_rho1, 0.1, 0.99) #Assume stationarity
best_rho2 = reg_AR2.params[:]
best_params = np.array([beta0, beta1, best_rho1, best_rho2[0], best_rho2[1]])
self.best_rho1 = best_rho1
self.best_rho2 = best_rho2
self.best_params = best_params
# C. Empirically fit phi for optimal policy to last observation
if self.phi_option == 'fit':
m = self.df2['google_smooth'][-15:].mean() # Take average of last 15 days to smooth volatility
s = self.S_vec[-1]/self.N
i = self.I_vec[-1]/self.N
gamma_tilde = self.df2['gamma_tilde'][-1]
pdth = self.df2['pdth_t'][-1]
pdth = max(pdth, self.pdth_min) # Get around cases where pdth=0 for countries with very few cases
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m)))
phi = -(LHS1 * LHS2)/m
self.phi = max(phi, self.phi_min)
elif self.phi_option == 'exo':
self.phi = self.phi_exo
return best_params
# ------------------ x. Forecasts ---------------------------
def step_gamma_tilde(self, gamma_tilde_lag1, gamma_tilde_lag2, model='AR1'):
if model =='AR1':
return self.best_rho1*gamma_tilde_lag1
elif model =='AR2':
return self.best_rho2[0]*gamma_tilde_lag1 + self.best_rho2[1]*gamma_tilde_lag2
def mobility_choice(self,x,gamma_tilde,pdth):
if self.policy == 'constant':
mob = self.poparam_constant
elif self.policy == 'linear-I': # Respond linearly to infection level
mob = self.poparam_linear_I[0] + self.poparam_linear_I[1]*x[2]
elif self.policy == 'linear-dI': # Respond to new infections
dI = r_i*x[1] - r_d*x[2] # x[1]=E, x[2]=I
mob = self.poparam_linear_dI[0] + self.poparam_linear_dI[1]*dI
elif self.policy == 'optim': # Analytical optimal policy based on simplified model and quadratic losses
beta0 = self.best_params[0]
beta1 = self.best_params[1]
phi = self.phi
s = x[0]/self.N
i = x[2]/self.N
m_set = np.linspace(-1,0,101)
RHS = -phi*m_set
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m_set))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m_set)))
LHS = LHS1 * LHS2
m_id = np.argmin(np.abs(RHS-LHS))
mob = m_set[m_id]
return mob
def fatality_factor(self,V): # Factor to adjust 'base' fatality prob
idx = (f_table[self.iso2]['vaccine_%'] - V/self.N).abs().argmin() # Find idx to look up in fatality table
factor = f_table[self.iso2]['fatality_ratio'][idx]
return factor
def sim_seir(self):
df2 = self.df2
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df3 = df2.reindex(ix)
x_init = self.get_initial_conditions()
x_data = np.array(x_init)
gamma_tilde_fc = self.df2['gamma_tilde'].values
gamma_tilde_sm_fc = self.df2['gamma_tilde_sm'].values
pdth_t_targ = [] # Death prob when vaccines are targeted
pdth_t_base = [] # Base death prob if vaccines are given randomly
pdth_t_fc = self.df2['pdth_t'].values
pdth_t_base_fc = pdth_t_fc.copy()
gamma_mob_fc = self.df2['gamma_mob'].values
mob_fc = self.df2['google_smooth'].values
# Load parameters
if hasattr(self, 'best_params'):
beta0, beta1, rho, rhos_1, rhos_2 = self.best_params
else:
df_param = pd.read_csv(f'../params/{param_load_folder}/param_est.csv')
beta0, beta1, rho, rhos_1, rhos_2 = df_param[self.iso2]
for t in range(self.maxT):
factor = self.fatality_factor(x_init[-1])
eta = self.target_weight
if t<len(self.df2): # In sample
pdth_t = pdth_t_fc[t]
pdth_base = pdth_t/(eta*factor + 1-eta)
pdth_targ = factor*pdth_base
# if t==len(self.df2): # Parse pdth_base of hospitalised/N
# y = pdth_t_base
# X = self.df2['HD_HR'].shift(30) # Use lagged hospitalised as the predictor
# X = sm.add_constant(X)
# reg_pdth = sm.OLS(y,X, missing='drop').fit()
# thetas = reg_pdth.params
# self.best_theta = thetas
# pdb.set_trace()
# pdth_t_basex = y - thetas[0] - thetas[1]*X # Base death prob, parsed of hospitalisation wave
# self.df2['pdth_base'] = pdth_t_base
# self.df2['pdth_base_x'] = pdth_t_basex
if t>len(self.df2)-1: # Out of sample
# Death probability
if self.pdth_assump == 'martingale': # Martingale death rate
pdth_base = pdth_t_base[-1]
elif self.pdth_assump == 'treatment': # Death prob slowly declines to assumed minimum and assumed halflife
pdth_base = self.pdth_theta*pdth_t_base[-1] + (1-self.pdth_theta)*self.pdth_min
pdth_base = max(pdth_base, self.pdth_min) # To get around pdth=0 for countries with very few cases
pdth_t = (eta*factor + 1-eta)*pdth_base
pdth_targ = factor*pdth_base
# Gamma_tilde
if self.gamma_tilde_model == 'AR1':
gamma_tilde = rho*gamma_tilde_sm_fc[t-1]
elif self.gamma_tilde_model == 'AR2':
gamma_tilde = rhos_1*gamma_tilde_sm_fc[t-1] + rhos_2*gamma_tilde_sm_fc[t-2]
elif self.gamma_tilde_model =='shock':
if t < len(self.df2) + self.gamma_shock_length:
gamma_tilde = gamma_tilde_sm_fc[len(self.df2)-1] + self.gamma_shock_depth
else:
gamma_tilde = rho*gamma_tilde_sm_fc[t-1]
# Mobility and overall gamma_t
mob_t = self.mobility_choice(x_init, gamma_tilde, pdth_t)
mob_t = max(mob_t, max_lockdown)
gamma_mob_t = beta0*np.exp(beta1*mob_t)
gamma_t = gamma_tilde + gamma_mob_t
# Append to data array
gamma_tilde_sm_fc = np.append(gamma_tilde_sm_fc, gamma_tilde)
gamma_tilde_fc = np.append(gamma_tilde_fc, gamma_tilde)
gamma_mob_fc = np.append(gamma_mob_fc, gamma_mob_t)
mob_fc = np.append(mob_fc, mob_t)
pdth_t_fc = np.append(pdth_t_fc, pdth_t)
pdth_t_base.append(pdth_base)
pdth_t_targ.append(pdth_targ)
# For in sample, use 'true' inputs
gamma_t = gamma_tilde_fc[t] + gamma_mob_fc[t]
p_dth = pdth_t_fc[t]
if t < range(self.maxT)[-1]: # Stop forecasting at the final period
x_next = self.step_seir(t, x_init, gamma_t, p_dth)
x_data = np.vstack((x_data, np.array(x_next)))
x_init = x_next
# Fill dataframe
col_temp = ['S', 'E', 'I', 'AR', 'DHR', 'DQR', 'AD', 'DHD', 'DQD', 'R', 'D', 'TH', 'DVR', 'DVD', 'DD', 'DT', 'V']
df4 = pd.DataFrame(x_data, columns=col_temp, index=df3.index)
df3 = df3.merge(df4, how='left', left_index=True, right_index=True)
df3['gamma_tilde_fc'] = gamma_tilde_fc
df3['gamma_mob_fc'] = gamma_mob_fc
df3['gamma_t_fc'] = df3['gamma_tilde_fc'] + df3['gamma_mob_fc']
df3['mob_fc'] = mob_fc
df3['pdth_t_fc'] = pdth_t_fc
df3['pdth_t_base'] = np.array(pdth_t_base)
df3['pdth_t_targ'] = np.array(pdth_t_targ)
df3[['S_N','I_N','DT_N','DD_N','V_N']] = df3[['S','I','DT','DD','V']]/self.N
self.df3 = df3
return df3
# ------------------ 5. Predict and plot ---------------------
def plot_all(self, saveplot=False):
df = self.df3
transpa = 0.0
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15,8), constrained_layout=True)
# df_bar = df_bar0[['GDP lost','Total deaths']]
# df_bar.plot(kind='bar', ax=ax[1,2], secondary_y='Total deaths', rot=0, legend=False)
# ax[1,2].set_ylabel('percent')
# ax[1,2].right_ax.set_ylabel('per million')
# ax[1,2].set_title('Losses of lives and output',fontsize='x-large')
# L = [mpatches.Patch(color=c, label=col)
# for col,c in zip( ('GDP loss','Deaths (rhs)'), plt.rcParams['axes.prop_cycle'].by_key()['color'])]
# ax[1,2] = plt.legend(handles=L, loc=1, framealpha=transpa)
ax[0,0].plot(df.index, 100*df['total_cases']/self.N, linewidth = 3, label='Case data', color='blue')
ax[0,0].plot(df.index, 100*df['DT']/self.N, label='$DT_t$', color='red')
ax[0,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,0].set_title('Cases',fontsize='x-large')
ax[0,0].set(ylabel = '% of population')
ax2 = ax[0,0].twinx()
ax2.plot(df.index, 100*df['I']/self.N, label='$I_t$ (rhs)',color='green',linestyle='--')
lines, labels = ax[0,0].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='center right', framealpha=transpa,fontsize='x-large')
#ax2.set(ylabel='% of population')
ax[0,1].plot(df.index, 100*df['total_deaths']/self.N, linewidth = 3, label='Death data', color='blue')
ax[0,1].plot(df.index, 100*df['DD']/self.N, label='$DD_t$', color='red')
ax[0,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,1].set_title('Deaths',fontsize='x-large')
ax[0,1].set(ylabel='% of population')
ax[0,1].legend(loc='best', framealpha=transpa ,fontsize='x-large')
ax[0,2].plot(df.index, 100*df['S']/self.N, label='$S_t$',color='red')
ax[0,2].plot(df.index, 100*df['V']/self.N, label='$V_t$',color='red',linestyle=':')
ax[0,2].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,2].set_title('Susceptible & vaccinated',fontsize='x-large')
ax[0,2].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[0,2].set(ylabel='% of population')
ax[1,0].plot(df.index, df['gamma_t'], label=r'$\gamma_t$',color='red')
ax[1,0].plot(df.index, df['gamma_mob'], label=r'$\gamma^{m}_t$', color ='blue')
ax[1,0].plot(df.index, df['gamma_tilde'], label=r'$\gamma^{d}$', color='orange')
ax[1,0].plot(df.index, df['gamma_t_fc'], color='red',linestyle=':')
ax[1,0].plot(df.index, df['gamma_mob_fc'], color ='blue',linestyle=':')
ax[1,0].plot(df.index, df['gamma_tilde_fc'], color='orange',linestyle=':')
ax[1,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,0].set_title('Infection rate',fontsize='x-large')
ax[1,0].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[1,1].plot(df.index, 100*df['google_smooth'], linewidth = 3, label='Google mobility', color='blue')
ax[1,1].plot(df.index, 100*df['mob_fc'], label='Model', color='red')
ax[1,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,1].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[1,1].set_title('Activity',fontsize='x-large')
ax[1,1].set(ylabel='% deviations from norm')
ax[1,2].plot(df.index, 100*df['pdth_t'], label='Death probability', linewidth=3, color='blue')
ax[1,2].plot(df.index, 100*df['pdth_t_fc'], color='black', label='Forecast')
ax[1,2].plot(df.index, 100*df['pdth_t_base'], color='black', linestyle='dashed', label='Random vaccines')
ax[1,2].plot(df.index, 100*df['pdth_t_targ'], color='black', linestyle=':', label='Targeted vaccines')
ax[1,2].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,2].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[1,2].set_title('Death probability',fontsize='x-large')
ax[1,2].set(ylabel='%')
plt.setp(ax[0,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,2].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,2].get_xticklabels(), rotation=30, horizontalalignment='right')
cname = coco.convert(names=self.iso2,to='name_short')
fig.suptitle(f'{cname}-{self.vac_assump}-{self.reinfect}',fontsize='xx-large')
if saveplot:
Path(f'../pics/fig_{date.today()}').mkdir(exist_ok=True)
fig.savefig(f'../pics/fig_{date.today()}/{self.iso2}-{self.policy}-{self.gamma_tilde_model}-{self.vac_assump}-{self.reinfect}.png')
return fig
def plot_portrait(self, saveplot=False):
df = self.df3
transpa = 0.0
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(10,12), constrained_layout=True)
ax[0,0].plot(df.index, 100*df['total_cases']/self.N, linewidth = 3, label='Case data', color='blue')
ax[0,0].plot(df.index, 100*df['DT']/self.N, label='$DT_t$', color='red')
ax[0,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,0].set_title('Cases',fontsize='x-large')
ax[0,0].set(ylabel = '% of population')
ax2 = ax[0,0].twinx()
ax2.plot(df.index, 100*df['I']/self.N, label='$I_t$ (rhs)',color='green',linestyle='--')
ax2.grid(None)
lines, labels = ax[0,0].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='center right', framealpha=transpa,fontsize='x-large')
#ax2.set(ylabel='% of population')
ax[0,1].plot(df.index, 100*df['total_deaths']/self.N, linewidth = 3, label='Death data', color='blue')
ax[0,1].plot(df.index, 100*df['DD']/self.N, label='$DD_t$', color='red')
ax[0,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,1].set_title('Deaths',fontsize='x-large')
ax[0,1].set(ylabel='% of population')
ax[0,1].legend(loc='best', framealpha=transpa ,fontsize='x-large')
ax[1,0].plot(df.index, 100*df['S']/self.N, label='$S_t$',color='red')
ax[1,0].plot(df.index, 100*df['V']/self.N, label='$V_t$',color='red',linestyle=':')
ax[1,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,0].set_title('Susceptible & vaccinated',fontsize='x-large')
ax[1,0].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[1,0].set(ylabel='% of population')
ax[1,1].plot(df.index, df['gamma_t'], label=r'$\gamma_t$',color='red')
ax[1,1].plot(df.index, df['gamma_mob'], label=r'$\gamma^{m}_t$', color ='blue')
ax[1,1].plot(df.index, df['gamma_tilde'], label=r'$\gamma^{d}$', color='orange')
ax[1,1].plot(df.index, df['gamma_t_fc'], color='red',linestyle=':')
ax[1,1].plot(df.index, df['gamma_mob_fc'], color ='blue',linestyle=':')
ax[1,1].plot(df.index, df['gamma_tilde_fc'], color='orange',linestyle=':')
ax[1,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,1].set_title('Infection rate',fontsize='x-large')
ax[1,1].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[2,0].plot(df.index, 100*df['google_smooth'], linewidth = 3, label='Google mobility', color='blue')
ax[2,0].plot(df.index, 100*df['mob_fc'], label='Model', color='red')
ax[2,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[2,0].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[2,0].set_title('Mobility',fontsize='x-large')
ax[2,0].set(ylabel='% deviations from norm')
ax[2,1].plot(df.index, 100*df['pdth_t'], label='Death probability', linewidth=3, color='blue')
ax[2,1].plot(df.index, 100*df['pdth_t_fc'], color='black', label='Forecast')
ax[2,1].plot(df.index, 100*df['pdth_t_base'], color='black', linestyle='dashed', label='Random vaccines')
ax[2,1].plot(df.index, 100*df['pdth_t_targ'], color='black', linestyle=':', label='Targeted vaccines')
ax[2,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[2,1].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[2,1].set_title('Death probability',fontsize='x-large')
ax[2,1].set(ylabel='%')
plt.setp(ax[0,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[2,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[2,1].get_xticklabels(), rotation=30, horizontalalignment='right')
cname = coco.convert(names=self.iso2,to='name_short')
fig.suptitle(f'{cname}',fontsize=18)
if saveplot:
Path(f'../pics/fig_{date.today()}').mkdir(exist_ok=True)
fig.savefig(f'../pics/fig_{date.today()}/Portrait-{self.iso2}-{self.policy}-{self.gamma_tilde_model}-{self.vac_assump}-{self.reinfect}.pdf')
return fig
# ---------------------------------------------
# Calling functions
# ---------------------------------------------
# -----------------------------------------
# x. Prelim parameters estimation
# Estimate k_star and save in file (only need to do this once)
def estimate_kstar(cset=['US']):
dict = {'Parameter': ['kstar']}
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
kstar = tmp.fit_gamma0()
dict[c] = kstar
df = pd.DataFrame(dict)
df.to_csv(f'../params/param_fixed/kstar.csv',index=False)
return df
# -------------------------
# x. Run complete package under scenarios: estimate, forecast, plot, save
def run_baseline(cset=['US']):
p_dict = {'Parameters': ['beta0','beta1','rho','rhos_1','rhos_2','phi']}
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
p_dict[c] = np.append(tmp.best_params, 1e9*tmp.phi)
tmp.sim_seir()
tmp.plot_all(saveplot='False')
tmp.df3.to_csv(f'../output/{out_save_folder}/df3_{tmp.iso2}.csv')
pd.DataFrame(p_dict).to_csv(f'../params/{param_save_folder}/param_est.csv',float_format='%.4f',index=False)
def run_gammashock(cset=['US']):
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.gamma_tilde_model = 'shock'
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_vaccines(cset=['US'],vac_assump='vac_worse'):
for c in cset:
tmp = solveCovid(c)
tmp.vac_assump = vac_assump
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_reinfect(cset=['US'],reinfect = 'reinfect'):
for c in cset:
tmp = solveCovid(c)
tmp.reinfect = reinfect
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_scenarios(cset=['US']): # Save class objects under various scenarios so we could draw plots across countries/scenarios
p_dict = {'Parameters': ['beta0','beta1','rho','rhos_1','rhos_2','phi']}
for c in cset:
#Baseline
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
p_dict[c] = np.append(tmp.best_params, 1e9*tmp.phi)
tmp.sim_seir()
tmp.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_baseline.pkl'
pickle.dump(tmp,open(name,'wb'))
# Vaccines
t_vac = solveCovid(c)
t_vac.vac_assump = 'vac_worse'
t_vac.prelim()
t_vac.gamma_t_compute()
t_vac.fitmodel()
t_vac.sim_seir()
t_vac.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_vacworse.pkl'
pickle.dump(t_vac,open(name,'wb'))
# Spikes
t_spike = solveCovid(c)
t_spike.prelim()
t_spike.gamma_t_compute()
t_spike.fitmodel()
t_spike.gamma_tilde_model = 'shock'
t_spike.sim_seir()
t_spike.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_shock.pkl'
pickle.dump(t_spike,open(name,'wb'))
# Reinfection
t_reinfect = solveCovid(c)
t_reinfect.reinfect = 'reinfect'
t_reinfect.prelim()
t_reinfect.gamma_t_compute()
t_reinfect.fitmodel()
t_reinfect.sim_seir()
t_reinfect.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_reinfect.pkl'
pickle.dump(t_reinfect,open(name,'wb'))
# Better
t_better = solveCovid(c)
t_better.vac_assump = 'vac_better' # (a) 30% Faster vaccines
t_better.target_weight = 0.9 # (b) More targeted
t_better.prelim()
t_better.gamma_t_compute()
t_better.fitmodel()
t_better.sim_seir()
t_better.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_better.pkl'
pickle.dump(t_better,open(name,'wb'))
pd.DataFrame(p_dict).to_csv(f'../params/{param_save_folder}/param_est.csv',float_format='%.4f',index=False)
def save_results(cset=['US']): # Unpack pickle and save all results into an excel
with pd.ExcelWriter(f'../output/{out_save_folder}/output_all.xlsx') as writer:
for c in cset:
print(f'Loading pickle for {c}')
tmp = pickle.load(open(f'../output/{out_load_folder}/{c}_baseline.pkl','rb'))
t_vac = pickle.load(open(f'../output/{out_load_folder}/{c}_vacworse.pkl','rb'))
t_spike = pickle.load(open(f'../output/{out_load_folder}/{c}_shock.pkl','rb'))
t_reinfect = pickle.load(open(f'../output/{out_load_folder}/{c}_reinfect.pkl','rb'))
#t_better = pickle.load(open(f'../output/{out_load_folder}/{c}_better.pkl','rb'))
tmp.df3.to_excel(writer, sheet_name=f'{c}_base')
t_vac.df3.to_excel(writer, sheet_name=f'{c}_vacworse')
t_spike.df3.to_excel(writer, sheet_name=f'{c}_shock')
t_reinfect.df3.to_excel(writer, sheet_name=f'{c}_reinfect')
#t_better.df3.to_excel(writer, sheet_name=f'{c}_better')
# ---------------------------------------------------
# x. Plotting functions
# ***** Utilities *****
def scatter1(x,y,xlab,ylab,df):
x1 = df[x]
y1 = df[y]
fig, ax = plt.subplots(figsize=(10,8))
ax.scatter(x1,y1,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x1.iloc[i], y1.iloc[i]), size=16)
ax.plot(np.unique(x1),
np.poly1d(np.polyfit(x1, y1, 1))(np.unique(x1)),
color='black')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
plt.xticks(fontsize= 20)
plt.yticks(fontsize= 20)
return fig, ax
def scatter2(x,y,x2,y2,xlab,ylab,df):
x1 = df[x]
y1 = df[y]
x2 = df[x2]
y2 = df[y2]
fig, ax = plt.subplots(figsize=(10,8))
ax.scatter(x1,y1,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x1.iloc[i], y1.iloc[i]), size=16, color='gray')
ax.plot(np.unique(x1),
np.poly1d(np.polyfit(x1, y1, 1))(np.unique(x1)),
color='gray')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
# Super impose with a new set
ax.scatter(x2,y2,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x2.iloc[i], y2.iloc[i]), size=16, color='blue')
ax.plot(np.unique(x2),
np.poly1d(np.polyfit(x2, y2, 1))(np.unique(x2)),
color='blue')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
plt.xticks(fontsize= 20)
plt.yticks(fontsize= 20)
return fig, ax
def all_output(cset=['US','DE']):
data_col = ['Mob 2021','Mob fc',
'GDP 2021','GDP fc',
'dDeath 2021','dDeath fc',
'dD/mn 2021','dD/mn fc',
'Mob 2021 3rdwave', 'Mob fc 3rdwave',
'GDP 2021 3rdwave', 'GDP fc 3rdwave',
'dDeath 2021 3rdwave', 'dDeath fc 3rdwave',
'dD/mn 2021 3rdwave', 'dD/mn fc 3rdwave',
'Mob 2021 vacworse', 'Mob fc vacworse',
'GDP 2021 vacworse', 'GDP fc vacworse',
'dDeath 2021 vacworse', 'dDeath fc vacworse',
'dD/mn 2021 vacworse', 'dD/mn fc vacworse',
'Mob 2021 reinfect', 'Mob fc reinfect',
'GDP 2021 reinfect', 'GDP fc reinfect',
'dDeath 2021 reinfect', 'dDeath fc reinfect',
'dD/mn 2021 reinfect', 'dD/mn fc reinfect',
# 'Mob 2021 better', 'Mob fc better',
# 'GDP 2021 better', 'GDP fc better',
# 'dDeath 2021 better', 'dDeath fc better',
# 'dD/mn 2021 better', 'dD/mn fc better',
]
data = {}
df_yratio = pd.read_csv(f'../output/growth-mob.csv', index_col=0)
for c in cset:
tmp = pickle.load(open(f'../output/{out_load_folder}/{c}_baseline.pkl','rb'))
tmp1 = pickle.load(open(f'../output/{out_load_folder}/{c}_shock.pkl','rb'))
tmp2 = pickle.load(open(f'../output/{out_load_folder}/{c}_vacworse.pkl','rb'))
tmp3 = pickle.load(open(f'../output/{out_load_folder}/{c}_reinfect.pkl','rb'))
# tmp4 = pickle.load(open(f'../output/{out_load_folder}/{c}_better.pkl','rb'))
cnum = tmp.df3.index.get_loc('2020-12-31')+1
d = tmp.df3['total_cases'].last_valid_index()
dnum = tmp.df3.index.get_loc(d)+1
mob_2021 = tmp.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc = tmp.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021 = 100*mob_2021*df_yratio.loc[c]['ym_ratio']
GDP_fc = 100*mob_fc*df_yratio.loc[c]['ym_ratio']
dD_2021 = tmp.df3['DD'][-1] - tmp.df3['DD'][cnum]
dD_fc = tmp.df3['DD'][-1] - tmp.df3['DD'][dnum]
dD_mn_2021 = 1000000*dD_2021/tmp.N
dD_mn_fc = 1000000*dD_fc/tmp.N
mob_2021_shock = tmp1.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_shock = tmp1.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_shock = 100*mob_2021_shock*df_yratio.loc[c]['ym_ratio']
GDP_fc_shock = 100*mob_fc_shock*df_yratio.loc[c]['ym_ratio']
dD_2021_shock = tmp1.df3['DD'][-1] - tmp1.df3['DD'][cnum]
dD_fc_shock = tmp1.df3['DD'][-1] - tmp1.df3['DD'][dnum]
dD_mn_2021_shock = 1000000*dD_2021_shock/tmp.N
dD_mn_fc_shock = 1000000*dD_fc_shock/tmp.N
mob_2021_vacworse = tmp2.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_vacworse = tmp2.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_vacworse = 100*mob_2021_vacworse*df_yratio.loc[c]['ym_ratio']
GDP_fc_vacworse = 100*mob_fc_vacworse*df_yratio.loc[c]['ym_ratio']
dD_2021_vacworse = tmp2.df3['DD'][-1] - tmp2.df3['DD'][cnum]
dD_fc_vacworse = tmp2.df3['DD'][-1] - tmp2.df3['DD'][dnum]
dD_mn_2021_vacworse = 1000000*dD_2021_vacworse/tmp.N
dD_mn_fc_vacworse = 1000000*dD_fc_vacworse/tmp.N
mob_2021_reinfect = tmp3.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_reinfect = tmp3.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_reinfect = 100*mob_2021_reinfect*df_yratio.loc[c]['ym_ratio']
GDP_fc_reinfect = 100*mob_fc_reinfect*df_yratio.loc[c]['ym_ratio']
dD_2021_reinfect = tmp3.df3['DD'][-1] - tmp3.df3['DD'][cnum]
dD_fc_reinfect = tmp3.df3['DD'][-1] - tmp3.df3['DD'][dnum]
dD_mn_2021_reinfect = 1000000*dD_2021_reinfect/tmp.N
dD_mn_fc_reinfect = 1000000*dD_fc_reinfect/tmp.N
# mob_2021_better = tmp4.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
# mob_fc_better = tmp4.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
# GDP_2021_better = 100*mob_2021_better*df_yratio.loc[c]['ym_ratio']
# GDP_fc_better = 100*mob_fc_better*df_yratio.loc[c]['ym_ratio']
# dD_2021_better = tmp4.df3['DD'][-1] - tmp4.df3['DD'][cnum]
# dD_fc_better = tmp4.df3['DD'][-1] - tmp4.df3['DD'][dnum]
# dD_mn_2021_better = 1000000*dD_2021_better/tmp.N
# dD_mn_fc_better = 1000000*dD_fc_better/tmp.N
data[c] = [mob_2021,mob_fc,
GDP_2021,GDP_fc,
dD_2021,dD_fc,
dD_mn_2021,dD_mn_fc,
mob_2021_shock, mob_fc_shock,
GDP_2021_shock, GDP_fc_shock,
dD_2021_shock, dD_fc_shock,
dD_mn_2021_shock, dD_mn_fc_shock,
mob_2021_vacworse, mob_fc_vacworse,
GDP_2021_vacworse, GDP_fc_vacworse,
dD_2021_vacworse, dD_fc_vacworse,
dD_mn_2021_vacworse, dD_mn_fc_vacworse,
mob_2021_reinfect, mob_fc_reinfect,
GDP_2021_reinfect, GDP_fc_reinfect,
dD_2021_reinfect, dD_fc_reinfect,
dD_mn_2021_reinfect, dD_mn_fc_reinfect,
# mob_2021_better, mob_fc_better,
# GDP_2021_better, GDP_fc_better,
# dD_2021_better, dD_fc_better,
# dD_mn_2021_better, dD_mn_fc_better,
]
df_out = pd.DataFrame.from_dict(data, orient='index', columns=data_col)
name = f'../output/{out_save_folder}/all_output.pkl'
pickle.dump(df_out,open(name,'wb'))
with pd.ExcelWriter(f'../output/{out_save_folder}/output_condensed.xlsx') as writer:
df_out.to_excel(writer, sheet_name='output')
return df_out
def update_table(cset=['US','DE']):
data_col = ['Mobility 2021','Mobility, now to mid 2022',
'Deaths/mn 2021','Deaths/mn, now to mid 2022',
]
data = {}
for c in cset:
tmp = pickle.load(open(f'../output/{out_load_folder}/{c}_baseline.pkl','rb'))
cnum = tmp.df3.index.get_loc('2021-01-31')
cnum2 = tmp.df3.index.get_loc('2021-12-31')
d = tmp.df3['total_cases'].last_valid_index()
dnum = tmp.df3.index.get_loc(d)+1
mob_2021 = tmp.df3['mob_fc'].iloc[cnum:cnum2].mean() # Average mobility for 2021
mob_fc = tmp.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
dD_2021 = tmp.df3['DD'][cnum2] - tmp.df3['DD'][cnum]
dD_fc = tmp.df3['DD'][-1] - tmp.df3['DD'][dnum]
dD_mn_2021 = 1000000*dD_2021/tmp.N
dD_mn_fc = 1000000*dD_fc/tmp.N
data[c] = [100*mob_2021,100*mob_fc,
dD_mn_2021,dD_mn_fc,
]
df_out = | pd.DataFrame.from_dict(data, orient='index', columns=data_col) | pandas.DataFrame.from_dict |
# CPTAC Images Join
import pandas as pd
import numpy as np
imglist = | pd.read_csv('../CPTAC-LUAD-HEslide-filename-mapping_Jan2019.csv', header=0) | pandas.read_csv |
import streamlit as st
import pandas as pd
import numpy as np
import sklearn.neighbors
import pydeck as pdk
import seaborn as sns
from util import config
from util import mapping
from util import trip_data
@st.cache(suppress_st_warning=True)
def load_data():
st.write('Loading data...')
trips = pd.read_feather(config.PROCESSED_DATA_PATH + 'trips_scaled.feather')
trips.set_index('rte_id', inplace=True)
gridpts_at_rte_500 = pd.read_feather(config.PROCESSED_DATA_PATH + 'gridpts_at_rte_500.feather')
gridpts_at_rte_500.set_index('rte_id', inplace=True)
grid_pts_500 = | pd.read_feather(config.MODEL_PATH + 'grid_points_500.feather') | pandas.read_feather |
"""
Utilities
"""
from microbepy.common import combination_iterator
from microbepy.common import config
from microbepy.common import constants as cn
from microbepy.common.equivalence_class import EquivalenceClass
from microbepy.common import schema
import collections
import math
import matplotlib.cm as cm
import numpy as np
import os
import pandas as pd
from sklearn import linear_model
import sqlite3 as sql
import sys
import types
DATA_DIR = "Data"
ALT_DATA_DIR = 'data_base' # Directory if project is installed
REFERENCE_DIR = "reference"
DATA_MODEL_DIR = "data_model"
DATA_DIRECTORIES = set([DATA_DIR, ALT_DATA_DIR])
GIT_DIR = ".git"
PYTHON_SUBDIRECTORIES = [
"statistics", "model", "correlation",
"data", "plot", "search", "common",
]
SPECIES = [cn.SPECIES_MIX_DVH, cn.SPECIES_MIX_MMP]
TOLERANCE = 0.001
GENE_PREFIXES = ['MMP', 'DVU']
SMALL_DETERMINANT = 1e-6
TABLES_GENOTYPE_PHENOTYPE = [
cn.TABLE_CULTURE, cn.TABLE_MUTATION, cn.TABLE_ISOLATE,
cn.TABLE_CULTURE_ISOLATE_LINK, cn.TABLE_ISOLATE_MUTATION_LINK,
cn.TABLE_GENE_DESCRIPTION,
]
Venn = collections.namedtuple('Venn', ['both', 'only1', 'only2'])
def isNumber(obj):
try:
_ = float(obj)
return True
except:
return False
def isStr(obj):
"""
:return bool: True if string or unicode
"""
type_string = str(type(obj))
if 'str' in type_string or 'unicode' in type_string:
return True
else:
return False
def toNumber(obj):
"""
Attempts to convert object to a number.
First an int, then a float.
If all fail, returns np.nan.
"""
for typ in [int, float]:
try:
val = typ(obj)
return val
except:
pass
return np.nan
def isStr(obj):
"""
:return bool: True if string or bytes
"""
type_string = str(type(obj))
if 'str' in type_string or 'bytes' in type_string:
return True
else:
return False
def isFloatsEqual(fp1, fp2, tolerance=TOLERANCE):
"""
Tests for approximate equality between two floating points.
:param float fp1:
:param float fp2:
:param float tolerance: a positive number less than 1
:return bool:
"""
if fp2 == 0:
if fp1 == 0:
return True
else:
return False
return (fp1 - fp2)/fp2 < tolerance
def getPath(path_initial, path_last):
"""
Constructs a path from the initial segment and the
termination (path_list).
:param list-of-str path_initial:
:param str/None pat_list:
:return str:
"""
def makeItem(item):
"""
Constructs a list containing either just the single item
(if it's non-None) or an empty list.
:param object item:
:return list:
"""
if item is None:
return []
else:
return [item]
path_elements = list(path_initial)
addendum = makeItem(path_last)
path_elements.extend(addendum)
#
path = path_elements[0]
if len(path_elements) > 1:
for ele in path_elements[1:]:
path = os.path.join(path, ele)
return path
def getDataModelPath(filename):
"""
:return str path: path to data model.
:param str filename or None: name of file in sequence data
if None, returns the path to the directory
"""
if cn.IS_TEST:
return cn.TEST_PATH
else:
try:
path = getIdentifiedDirectory(key_directory=GIT_DIR)
result = getPath([path, DATA_DIR, DATA_MODEL_DIR],
filename)
except ValueError:
path = getIdentifiedDirectory(key_directory=ALT_DATA_DIR)
result = getPath([path, ALT_DATA_DIR], filename)
return result
def getReferenceDataPath(filename):
"""
:return str path: path to processed data
:param str filename or None: name of file in sequence data
if None, returns the path to the directory
"""
root_directory = getIdentifiedDirectory()
return getPath([root_directory, DATA_DIR, REFERENCE_DIR],
filename)
def getSequenceDataPath(filename):
"""
Provides a path to the sequence data.
:param str filename or None: name of file
if None, returns the path to the directory
"""
return getPath([getRootDataDirectory(),
"sequence_data"], filename)
def getRateYieldDataPath(filename):
"""
Provides a path to the rate-yield data.
:param str filename or None: name of file
if None, returns the path to the directory
"""
return getPath([getRootDataDirectory(),
"growth_data", "rate_yield"], filename)
def getODTimeseriesDataPath(filename):
"""
Provides a path to the rate-yield data.
:param str filename or None: name of file
if None, returns the path to the directory
"""
return getPath([getRootDataDirectory(),
"growth_data", "OD_timeseries"], filename)
def getGeneNamesFromList(columns):
"""
:param list-of-str columns: list of names, some of which are genes
:return list-of-str:
"""
names = []
for pfx in GENE_PREFIXES:
pfx_len = len(pfx)
for ele in columns:
if not isNull(ele):
if ele[0:pfx_len] == pfx:
names.append(ele)
return names
def isStr(v):
if isinstance(v, str):
return True
if isinstance(v, bytes):
return True
def messageConsole(msg):
print("*** %s" % msg)
def isNan(v):
if isinstance(v, float):
return np.isnan(v)
else:
return False
def isNull(v):
if isNan(v):
return True
singleton_types = [str, int, bytes, type(None)]
if any([isinstance(v, t) for t in singleton_types]):
if v is None:
return True
if (v == "None") or (v == "none") or (v == "nan"):
return True
return False
def isNanInDataFrame(df, nan_columns=None):
"""
:param pd.DataFrame df:
:return bool, list-of-str: list of nan columns
"""
if nan_columns is None:
nan_columns = []
columns = []
for col in df.columns.tolist():
if col in nan_columns:
continue
if any([isNan(x) for x in df[col].tolist()]):
columns.append(col)
if len(columns) == 0:
return False, columns
else:
return True, columns
def replaceNan(df, columns=None, value=0):
"""
Replaces nan values in specified columns of a dataframe.
:param pd.DataFramed df:
:param list-of-str columns: columns to be transformed
:param object value:
"""
if columns is None:
columns = df.columns.tolist()
for column in columns:
new_values = [value if isNan(x) else x for x in df[column]]
df[column] = new_values
def getColorMap(num_colors=20):
"""
:return list-of-color:
"""
# generate data
N = num_colors
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
# get a colormap from matplotlib
colormap =cm.get_cmap("gist_rainbow") #choose any matplotlib colormap here
# define maximum and minimum for cmap
colorspan=[40,140]
# create a color channel with a value between 0 and 1
# outside the colorspan the value becomes 0 (left) and 1 (right)
cmap_input=np.interp(np.sqrt(x*x+y*y),colorspan,[0,1],left=0,right=1)
# use colormap to generate rgb-values
# second value is alfa (not used)
# third parameter gives int if True, otherwise float
A_color=colormap(cmap_input,1,True)
# convert to hex to fit to bokeh
bokeh_colors = ["#%02x%02x%02x" % (r, g, b) for r, g, b in A_color[:,0:3]]
return bokeh_colors
def getColumnsFromDataFrame(df, columns):
"""
:return pd.DataFrame: has the specified columns
:raises ValueError: column not present
"""
trues = [c in df.columns.tolist() for c in columns]
if not all(trues):
raise ValueError("Column not present in DataFrame")
return pd.DataFrame(df[columns])
def changeColumnValues(df, func):
"""
Change values in the columns.
:param DataFrame df:
:param Function func:
Inputs: column (str), value (float)
Output: float
"""
for col in df.columns:
values = df[col].tolist()
new_values = [func(col, v) for v in values]
df[col] = new_values
def setNoneList(values):
"""
:param list/None values:
:return list
"""
if values is None:
return []
else:
return values
def getColumnType(column):
"""
Finds the type for a column
:param str column:
:return type:
"""
for column_schema in cn.TABLE_SCHEMAS.column_schemas.getSchemas():
if column == column_schema.name:
return column_schema.data_type
raise RuntimeError("Column %s is not typed" % column)
def cleanDF(df, is_reset_index=False):
"""
Removes duplicates and stray columns.
Removes all columns of the form "index_".
:param pd.DataFrame df:
"""
df.drop_duplicates(inplace=True)
df = pruneNullRows(df)
if is_reset_index:
resetIndex(df)
remove_column = "%s_" % cn.INDEX
for column in df.columns:
if (column.find(remove_column) == 0) \
or (column == cn.INDEX):
del df[column]
return df
def resetIndex(df):
"""
Resets the index handling stray columns.
:param pd.DataFrame df:
"""
LEVEL_0 = 'level_0'
columns = df.columns
if LEVEL_0 in columns:
del df[LEVEL_0]
df.reset_index(inplace=True)
if LEVEL_0 in columns:
del df[LEVEL_0]
def standardize(df, columns=None):
"""
Standardizes a numeric column in a dataframe.
:param pd.DataFrame df:
:param list-of-str columns
"""
if columns is None:
columns = df.columns
for column in columns:
std = np.std(df[column])
avg = np.mean(df[column])
if np.isclose(std, 0):
df[column] = 0
else:
df[column] = (df[column] - avg)/std
def deleteColumns(df, columns, is_drop_duplicates=True):
"""
Deletes columns from the DataFrame, if they are present.
:param pd.DataFrame df:
:param list-of-str columns:
:param bool is_drop_duplicates:
"""
for column in columns:
if column in df.columns:
del df[column]
if is_drop_duplicates:
df.drop_duplicates(inplace=True)
def trimDF(df, keep_columns=None, delete_columns=None):
"""
Keeps/deletes columns in a dataframe.
Exactly one of keep_columns/delete_columns can be non-None.
:param pd.DataFrame df:
:param list-of-str keep_columns: Columns to keep
:param list-of-str delete_columns: Columns to delete
:return pd.DataFrame:
"""
if keep_columns is None and delete_columns is None:
raise ValueError("Invalid parameters.")
if not keep_columns is None and not delete_columns is None:
raise ValueError("Invalid parameters.")
if keep_columns is not None:
df_result = df[keep_columns].copy(deep=True)
if delete_columns is not None:
df_result = df.copy(deep=True)
deleteColumns(df_result, delete_columns, is_drop_duplicates=True)
df_result.drop_duplicates(inplace=True)
return df_result
def makeNullRow(df, null=np.nan):
"""
Creates a row of null values to the dataframe.
:param object null: value in row
:return dict: row
"""
row = {}
for column in df.columns.tolist():
row[column] = null
return row
def addNullRow(df, null=np.nan):
"""
Adds a row of null values to the dataframe.
:param object null: value in row
:return pd.DataFrame:
"""
return df.append(makeNullRow(df), ignore_index=True)
def getDBPath():
"""
The correct choice of a path depends on the runtime environment:
1. Running with .microbepy/config.yml in the home
directory (includes case of being installed)
SQLDB_PATH: <the path>
2. Running within the microbepy project:
microbepy/Data/data_model/sythetic.db
3. Running as a subproject:
<containing project>/Data/data_model/microbepy.db
:return str:
:raises KeyError: configuration file exists but no DB path
:raises ValueError: cannot find .git file for project root
"""
try:
path = config.get(key=cn.SQLDB_PATH_NAME)
except KeyError:
path = None
if path is None:
try:
gitbase_path = getIdentifiedDirectory(key_directory=GIT_DIR)
except:
raise ValueError("Cannot find project directory.")
path = getPath([gitbase_path, "Data", "data_model"],
cn.SYNTHETIC_DB)
return path
def getDBConnection(path=None):
if path is None:
path = getDBPath()
try:
conn = sql.connect(path)
except:
raise ValueError("Invalid path to DB: %s" % path)
return conn
def getDuplicates(values):
"""
:param list-of-object values:
:return list-of-object:
"""
result = list(values)
[result.remove(x) for x in set(values)]
return result
def typeDF(df, columns=None):
"""
Ensures that column values are of the correct type.
:param list-of-str columns:
:return pd.DataFrame:
"""
if columns is None:
columns = list(df.columns)
for column in columns:
typ = getColumnType(column)
values = df[column]
df[column] = [np.nan if isNull(v) else typ(v) for v in values]
return df
def removeDuplicateColumns(df):
"""
Removes columns that have a duplicate name.
:return pd.DataFrame:
"""
duplicates = getDuplicates(df.columns)
done = False
idx = 0
df_result = df.copy()
additions_dict = {}
while not done:
if idx >= len(df_result.columns):
done = True
break
column = df_result.columns[idx]
if column in duplicates:
df1 = df_result[column]
values = df1.iloc[:,1]
del df_result[column]
duplicates.remove(column)
additions_dict[column] = values
else:
idx += 1
df_add = pd.DataFrame(additions_dict)
df_result = pd.concat([df_result, df_add], axis=1, sort=True)
return df_result
def makeVenn(group1, group2):
"""
Constructs the Venn regions for two groups:
both - overlap between the groups
only1 - only n group 1
only2 - only n group 2
:param list-of-object group1:
:param list-of-object group2:
:return Venn:
"""
set1 = set(group1)
set2 = set(group2)
return Venn(
both=set1.intersect(set2),
only1=set1.difference(set2),
only2=set2.difference(set1),
)
def selNonNull(values1, values2):
"""
Selects the non-null value of the two lists in its position.
:param list-of-object values1:
:param list-of-object values2:
:param list-of-object result:
"""
if len(values1) != len(values2):
raise ValueError("Inputs must have the same length.")
pairs = zip(values1, values2)
result = []
for x,y in pairs:
if x == y:
result.append(x)
elif isNull(x):
result.append(y)
elif isNull(y):
result.append(x)
else:
msg = "Inputs must be identical if in the same position and non-null."
raise ValueError(msg)
return result
def mergeRowsColumns(df1, df2, merge_column):
"""
Merges the two dataframes, combining both rows and columns.
Column values are appended where the dataframes have the same columns.
nan values are added to columns where the column is not present
in a dataframe.
:param pd.DataFrame df1:
:param pd.DataFrame df2:
:param str merge_column: column on which the merge is done
:return pd.DataFrame: Has columns df1.columns + df2.columns
"""
LEFT = "__left"
RIGHT = "__right"
df_1 = removeDuplicateColumns(df1)
cleanDF(df_1)
df_2 = removeDuplicateColumns(df2)
cleanDF(df_2)
df_result = df_1.merge(df_2, on=merge_column, how='outer',
suffixes=(LEFT, RIGHT))
# Merge the overlapping columns
left_overlaps = [c for c in df_result.columns
if c[-len(LEFT):] == LEFT]
left_overlaps.sort()
right_overlaps = [c for c in df_result.columns
if c[-len(RIGHT):] == RIGHT]
right_overlaps.sort()
pairs = zip(left_overlaps, right_overlaps)
for left, right in pairs:
values = selNonNull(df_result[left], df_result[right])
pos = left.find(LEFT)
column = left[0:pos]
del df_result[left]
del df_result[right]
df_result[column] = values
# Finalize result
cleanDF(df_result)
return df_result
def readSQL(cmd, path=None):
"""
Creates a dataframe for the SQL query.
1. Duplicate column names (ending with ':n') are merged.
2. Deletes columns that begin with 'index'
3. None values have a consistent representation
:param str/TableSchema cmd: SQL query command or TableSchema,
if want the entire table
:param str path: path to database
:return pd.DataFrame:
"""
SEP = ":"
if isinstance(cmd, schema.TableSchema):
sql_cmd = "SELECT * FROM %s" % cmd.name
else:
sql_cmd = cmd
conn = getDBConnection(path=path)
df_result = pd.read_sql(sql_cmd, conn)
conn.close()
done = False
while not done:
duplicates = [c for c in df_result.columns if c.count(SEP) > 0]
if len(duplicates) == 0:
done = True
break
column = duplicates[0]
pos = column.find(SEP)
column = column[:pos]
columns = [c for c in df_result.columns if c.count(column) > 0]
# Delete index columns
if column in ['level_0', cn.INDEX]:
for col in columns:
del df_result[col]
# Merge other columns
else:
col_first = columns[0]
values = df_result[col_first].tolist()
del df_result[col_first]
columns = columns[1:]
for col in columns:
try:
values = selNonNull(values, df_result[col].tolist())
except:
import pdb; pdb.set_trace()
del df_result[col]
df_result[column] = values
df_result = unifyNullValues(df_result)
return df_result
def unifyNullValues(df_value):
"""
Makes all null df_value cn.NONE.
:param pd.DataFrame df_value:
:return pd.DataFrame:
"""
return df_value.fillna(value=np.nan)
#return df_value.applymap(lambda v: cn.NONE if isNull(v) else v)
def pruneNullRows(df):
"""
Removes rows that are all nulls.
:param pd.DataFrame df:
This is done in place to avoid storage problems with large dataframes.
:return pd.DataFrame:
"""
return df.dropna(axis=0, how='all')
def pruneRowsWithNullColumns(df, columns):
"""
Deletes rows where there is a null value in any of a list
of columns.
:param pd.DataFrame df:
:param list-of-str colums:
:return pd.DataFrame:
"""
def check(row):
return not any([isNull(row[c]) for c in columns])
#
sel = df.apply(check, axis=1)
return pd.DataFrame(df.loc[sel])
def coerceDF(df):
"""
Coerces the columns to their type if the type is known.
:param pd.DataFrame df:
:return pd.DataFrame:
"""
df_result = df.copy(deep=True)
for column in df_result.columns:
try:
schema = cn.TABLE_SCHEMAS.column_schemas.getSchema(column)
if schema.data_type in [float, int, bool]:
df_result[column] = pd.to_numeric(df_result[column])
# Get an exception if the column type is unknown
except ValueError:
pass
return df_result
def appendUnique(values, value):
"""
Adds value to values if it is not already present.
:param list-of-object values:
:param object value:
:return list-of-object:
"""
new_values = list(values)
if not value in new_values:
new_values.append(value)
return new_values
def mergeLeft(df1, df2, merge_column):
"""
Does a left join properly handling null values (which pandas does not do).
:param pd.DataFrame df1: left dataframe
:param pd.DataFrame df2:
:param str merge_column: column on which the merge is done
:return pd.DataFrame:
Assumes that the only overlap in column names is the merge_column
"""
overlaps = set(df1.columns).intersection(df2.columns)
if overlaps != set([merge_column]):
raise ValueError ("Must only have the merge column in common!")
# Find the null values on the left
sel = [not x for x in df1[merge_column].isnull()]
df_left = df1[sel]
# Merge non-null values
df_result = df_left.merge(df2, on=merge_column, how='left')
# Include the omitted rows as null columns
sel = [x for x in df1[merge_column].isnull()]
df_null = df1[sel].copy()
for column in df2.columns:
if column != merge_column:
df_null[column] = cn.NONE
# Add the missing rows
df_result = pd.concat([df_result, df_null], sort=True)
# Finalize result
cleanDF(df_result)
return df_result
def unpivot(df, label_column='label', value_column='value'):
"""
Converts a pivoted dataframe into one with a column
of labels.
:param pd.DataFrame: DF with values in cells and value
labels in columns
:param str label_column: name of the label column on output
:param str value_column: name of the value column on output
:return pd.DataFrame: label_column, value_column
"""
return pd.melt(df, var_name=label_column, value_vars=df.columns,
value_name=value_column)
def makeMatrix(df,
row_name=cn.KEY_ISOLATE,
column_name=cn.KEY_MUTATION,
value_name=cn.COUNT,
default_value=0.0,
):
"""
Creates a data matrix that consists of only a column of
row names, a column of column name, and a column of values.
:param pd.DataFrame df:
:param str row_name: name of the column in df used for row values
:param str column_name: name of column whose values are used
as column names in the matrix
:param str value_name: name of the column from which values
are obtained.
:param float default_value: used for missing values
"""
df_sub = df[[row_name, column_name, value_name]].copy()
df_sub.drop_duplicates(inplace=True)
sel = df_sub.apply(
lambda r: (not isNull(r[column_name]))
and (not isNull(r[row_name])),
axis=1
)
df_sub = df_sub.loc[sel]
df_result = df_sub.pivot_table(index=row_name, columns=column_name, values=value_name)
df_result = df_result.applymap(lambda x: 0 if isNull(x) else x)
return df_result
def set2list(aSet):
return [x for x in aSet]
def removeDuplicatesFromList(values):
"""
Trims the list to remove duplicates. List elements
may be a string or a list of strings or int.
Order is not preserved.
:param list-of-str or list-of-list-of-other values:
:return list-of-inputtype:
"""
if isStr(values[0]) or isNumber(values[0]):
new_values = set(values)
return set2list(new_values)
else:
df = | pd.DataFrame(values) | pandas.DataFrame |
import matplotlib
#matplotlib.use('agg')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cmocean
#####
# The following section is a little brittle due to hardcoded names, but we'll fix
# that later. Code copy pasted from jupyter notebook.
#####
def all_plots(df):
'''Create and output all plots'''
btl_t1_residuals_pressure_plot(df)
btl_t2_residuals_pressure_plot(df)
t1_t2_residuals_pressure_plot(df)
btl_t1_residuals_station_plot(df)
btl_t2_residuals_station_plot(df)
t1_t2_residuals_station_plot(df)
btl_t1_residuals_station_deep_plot(df)
btl_t2_residuals_station_deep_plot(df)
t1_t2_residuals_station_deep_plot(df)
btl_c1_residuals_pressure_plot(df)
btl_c2_residuals_pressure_plot(df)
c1_c2_residuals_pressure_plot(df)
btl_c1_residuals_station_plot(df)
btl_c2_residuals_station_plot(df)
c1_c2_residuals_station_plot(df)
btl_c1_residuals_station_deep_plot(df)
btl_c2_residuals_station_deep_plot(df)
c1_c2_residuals_station_deep_plot(df)
c_t_coherence_plot(df)
btl_c1_residuals_compare_plot(df)
btl_c2_residuals_compare_plot(df)
c1_c2_residuals_compare_plot(df)
btl_c1_residuals_station_uncorrected_plot(df)
btl_c2_residuals_station_uncorrected_plot(df)
c1_c2_residuals_station_uncorrected_plot(df)
btl_sal_pressure_plot(df)
btl_sal_station_plot(df)
btl_sal_station_deep_plot(df)
btl_oxy_residuals_pressure_plot(df)
btl_oxy_residuals_station_plot(df)
btl_oxy_residuals_station_deep_plot(df)
btl_oxy_residuals_temperature_plot(df)
btl_oxy_residuals_station_temperature_plot(df)
btl_oxy_residuals_station_deep_temperature_plot(df)
btl_oxy_residuals_pressure_concentration_plot(df)
btl_oxy_residuals_station_concentration_plot(df)
return None
#################################################################
##### Here lies the temperature plots, long may they rest. #####
#################################################################
def btl_t1_residuals_pressure_plot(reft_vals, t1_vals, press, stnno):
reft_t1 = reft_vals - t1_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(reft_t1, -press, marker='+', c=stnno, cmap=plt.cm.tab20c_r)
ax.set_xlim(-0.02,0.02)
ax.set_title('REFTMP-CTDTMP1 vs CTDPRS')
ax.set_xlabel('T1 Residual (T90 C)')
ax.set_ylabel('Pressure (dbar)')
cbar = fig.colorbar(cm)
cbar.set_label('Station Number')
fig.savefig('./data/images/reftmp_t1_p.svg', format='svg')
fig.savefig('./data/images/reftmp_t1_p.pdf', format='pdf')
plt.close()
return None
def btl_t2_residuals_pressure_plot(reft_vals, t2_vals, press, stnno):
reft_t2 = reft_vals - t2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(reft_t2, -press, marker='+', c=stnno, cmap=plt.cm.tab20c_r)
ax.set_xlim(-0.02,0.02)
ax.set_title('REFTMP-CTDTMP2 vs CTDPRS')
ax.set_xlabel('T2 Residual (T90 C)')
ax.set_ylabel('Pressure (dbar)')
cbar = fig.colorbar(cm)
cbar.set_label('Station Number')
fig.savefig('./data/images/reftmp_t2_p.svg', format='svg')
fig.savefig('./data/images/reftmp_t2_p.pdf', format='pdf')
plt.close()
return None
def t1_t2_residuals_pressure_plot(t1_vals, t2_vals, press, stnno):
t1_t2 = t1_vals - t2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(t1_t2, -press, marker='+', c=stnno, cmap=plt.cm.tab20c_r)
ax.set_xlim(-0.02,0.02)
ax.set_title('CTDTMP1-CTDTMP2 vs CTDPRS')
ax.set_xlabel('T1-T2 Residual (T90 C)')
ax.set_ylabel('Pressure (dbar)')
cbar = fig.colorbar(cm)
cbar.set_label('Station Number')
fig.savefig('./data/images/t1_t2_p.svg', format='svg')
fig.savefig('./data/images/t1_t2_p.pdf', format='pdf')
plt.close()
return None
def btl_t1_residuals_station_plot(reft_vals, t1_vals, press, stnno):
reft_t1 = reft_vals - t1_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, reft_t1, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('REFTMP-CTDTMP1 vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('T1 Residual (T90 C)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/reftmp_t1_stn.svg', format='svg')
fig.savefig('./data/images/reftmp_t1_stn.pdf', format='pdf')
plt.close()
return None
def btl_t2_residuals_station_plot(t1_vals, t2_vals, press, stnno):
t1_t2 = t1_vals - t2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, t1_t2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('REFTMP-CTDTMP2 vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('T2 Residual (T90 C)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/reftmp_t2_stn.svg', format='svg')
fig.savefig('./data/images/reftmp_t2_stn.pdf', format='pdf')
plt.close()
return None
def t1_t2_residuals_station_plot(t1_vals, t2_vals, press, stnno):
t1_t2 = t1_vals - t2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, t1_t2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('CTDTMP1-CTDTMP2 vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('T1-T2 Residual (T90 C)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/t1_t2_stn.svg', format='svg')
fig.savefig('./data/images/t1_t2_stn.pdf', format='pdf')
plt.close()
return None
def btl_t1_residuals_station_deep_plot(reft_vals, t1_vals, press, stnno):
df = pd.DataFrame()
df['CTDPRS'] = press
df['REFT_T1'] = reft_vals - t1_vals
df['STNNBR'] = stnno
df_deep = df[df['CTDPRS'] > 2000]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(df_deep['STNNBR'], df_deep['REFT_T1'], marker='+', c=df_deep['CTDPRS'], cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('REFTMP-CTDTMP1 (>2000 db) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('T1 Residual (T90 C)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/reftmp_t1_stn_deep.svg', format='svg')
fig.savefig('./data/images/reftmp_t1_stn_deep.pdf', format='pdf')
plt.close()
return None
def btl_t2_residuals_station_deep_plot(reft_vals, t2_vals, press, stnno):
df = pd.DataFrame()
df['CTDPRS'] = press
df['REFT_T2'] = reft_vals - t2_vals
df['STNNBR'] = stnno
df_deep = df[df['CTDPRS'] > 2000]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(df_deep['STNNBR'], df_deep['REFT_T2'], marker='+', c=df_deep['CTDPRS'], cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('REFTMP-CTDTMP2 (>2000 db) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('T2 Residual (T90 C)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/reftmp_t2_stn_deep.svg', format='svg')
fig.savefig('./data/images/reftmp_t2_stn_deep.pdf', format='pdf')
plt.close()
return None
def t1_t2_residuals_station_deep_plot(t1_vals, t2_vals, press, stnno):
df = pd.DataFrame()
df['CTDPRS'] = press
df['T1_T2'] = t1_vals - t2_vals
df['STNNBR'] = stnno
df_deep = df[df['CTDPRS'] > 2000]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(df_deep['STNNBR'], df_deep['T1_T2'], marker='+', c=df_deep['CTDPRS'], cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('CTDTMP1-CTDTMP2 (>2000 db) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('T1-T2 Residual (T90 C)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/t1_t2_stn_deep.svg', format='svg')
fig.savefig('./data/images/t1_t2_stn_deep.pdf', format='pdf')
plt.close()
return None
#################################################################
##### Here lies the conductivity plots, long may they rest. #####
#################################################################
def btl_c1_residuals_pressure_plot(refc_vals, c1_vals, press, stnno):
refc_c1 = refc_vals - c1_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(refc_c1, -press, marker='+', c=stnno, cmap=plt.cm.tab20c_r)
ax.set_xlim(-0.02,0.02)
ax.set_title('BTLCOND-CTDCOND1 vs CTDPRS')
ax.set_xlabel('C1 Residual (mS/cm)')
ax.set_ylabel('Pressure (dbar)')
cbar = fig.colorbar(cm)
cbar.set_label('Station Number')
fig.savefig('./data/images/btlcond_c1_p.svg', format='svg')
fig.savefig('./data/images/btlcond_c1_p.pdf', format='pdf')
plt.close()
return None
def btl_c2_residuals_pressure_plot(refc_vals, c2_vals, press, stnno):
refc_c2 = refc_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(refc_c2, -press, marker='+', c=stnno, cmap=plt.cm.tab20c_r)
ax.set_title('BTLCOND-CTDCOND2 vs CTDPRS')
ax.set_xlabel('C2 Residual (mS/cm)')
ax.set_ylabel('Pressure (dbar)')
cbar = fig.colorbar(cm)
cbar.set_label('Station Number')
fig.savefig('./data/images/btlcond_c2_p.svg', format='svg')
fig.savefig('./data/images/btlcond_c2_p.pdf', format='pdf')
plt.close()
return None
def c1_c2_residuals_pressure_plot(c1_vals, c2_vals, press, stnno):
c1_c2 = c1_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(c1_c2, -press, marker='+', c=stnno, cmap=plt.cm.tab20c_r)
ax.set_xlim(-0.02,0.02)
ax.set_title('CTDCOND1-CTDCOND2 vs CTDPRS')
ax.set_xlabel('C1-C2 Residual (mS/cm)')
ax.set_ylabel('Pressure (dbar)')
cbar = fig.colorbar(cm)
cbar.set_label('Station Number')
fig.savefig('./data/images/c1_c2_p.svg', format='svg')
fig.savefig('./data/images/c1_c2_p.pdf', format='pdf')
plt.close()
return None
def btl_c1_residuals_station_plot(refc_vals, c1_vals, press, stnno):
refc_c1 = refc_vals - c1_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, refc_c1, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND-CTDCOND1 vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C1 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlcond_c1_stn.svg', format='svg')
fig.savefig('./data/images/btlcond_c1_stn.pdf', format='pdf')
plt.close()
return None
def btl_c2_residuals_station_plot(refc_vals, c2_vals, press, stnno):
refc_c2 = refc_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, refc_c2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND-CTDCOND2 vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlcond_c2_stn.svg', format='svg')
fig.savefig('./data/images/btlcond_c2_stn.pdf', format='pdf')
plt.close()
return None
def c1_c2_residuals_station_plot(c1_vals, c2_vals, press, stnno):
c1_c2 = c1_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, c1_c2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('CTDCOND1-CTDCOND2 vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C1-C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/c1_c2_stn.svg', format='svg')
fig.savefig('./data/images/c1_c2_stn.pdf', format='pdf')
plt.close()
return None
def btl_c1_residuals_station_deep_plot(refc_vals, c1_vals, press, stnno):
df = pd.DataFrame()
df['CTDPRS'] = press
df['REFT_C1'] = refc_vals - c1_vals
df['STNNBR'] = stnno
df_deep = df[df['CTDPRS'] > 2000]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(df_deep['STNNBR'], df_deep['REFT_C1'], marker='+', c=df_deep['CTDPRS'], cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND-CTDCOND1 (>2000 db) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C1 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlcond_c1_stn_deep.svg', format='svg')
fig.savefig('./data/images/btlcond_c1_stn_deep.pdf', format='pdf')
plt.close()
return None
def btl_c2_residuals_station_deep_plot(refc_vals, c2_vals, press, stnno):
df = pd.DataFrame()
df['CTDPRS'] = press
df['REFT_C2'] = refc_vals - c2_vals
df['STNNBR'] = stnno
df_deep = df[df['CTDPRS'] > 2000]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(df_deep['STNNBR'], df_deep['REFT_C2'], marker='+', c=df_deep['CTDPRS'], cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND-CTDCOND2 (>2000 db) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlcond_c2_stn_deep.svg', format='svg')
fig.savefig('./data/images/btlcond_c2_stn_deep.pdf', format='pdf')
plt.close()
return None
def c1_c2_residuals_station_deep_plot(c1_vals, c2_vals, press, stnno):
df = pd.DataFrame()
df['CTDPRS'] = press
df['C1_C2'] = c1_vals - c2_vals
df['STNNBR'] = stnno
df_deep = df[df['CTDPRS'] > 2000]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(df_deep['STNNBR'], df_deep['C1_C2'], marker='+', c=df_deep['CTDPRS'], cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('CTDCOND1-CTDCOND2 (>2000 db) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C1-C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/c1_c2_stn_deep.svg', format='svg')
fig.savefig('./data/images/c1_c2_stn_deep.pdf', format='pdf')
plt.close()
return None
def c_t_coherence_plot(t1_vals, t2_vals, c1_vals, c2_vals, press):
t1_t2 = t1_vals - t2_vals
c1_c2 = c1_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(t1_t2, c1_c2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_xlim(-0.02,0.02)
ax.set_title('T1-T2 vs C1-C2')
ax.set_xlabel('T1-T2 Residual (T90 C)')
ax.set_ylabel('C1-C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/c_t_coherence_p.svg', format='svg')
fig.savefig('./data/images/c_t_coherence_p.pdf', format='pdf')
plt.close()
return None
def btl_c1_residuals_compare_plot(refc_vals, c1_vals, press):
refc_c1 = refc_vals - c1_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(refc_vals, refc_c1, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND vs BTLCOND-CTDCOND1')
ax.set_xlabel('Reference Conductivity (mS/cm)')
ax.set_ylabel('C1 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlcond_c1_compare.svg', format='svg')
fig.savefig('./data/images/btlcond_c1_compare.pdf', format='pdf')
plt.close()
return None
def btl_c2_residuals_compare_plot(refc_vals, c2_vals, press):
refc_c2 = refc_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(refc_vals, refc_c2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND vs BTLCOND-CTDCOND2')
ax.set_xlabel('Reference Conductivity (mS/cm)')
ax.set_ylabel('C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlcond_c2_compare.svg', format='svg')
fig.savefig('./data/images/btlcond_c2_compare.pdf', format='pdf')
plt.close()
return None
def c1_c2_residuals_compare_plot(refc_vals, c1_vals, c2_vals, press):
c1_c2 = c1_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(refc_vals, c1_c2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND vs CTDCOND1-CTDCOND2')
ax.set_xlabel('Reference Conductivity (mS/cm)')
ax.set_ylabel('C1-C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/c1_c2_compare.svg', format='svg')
fig.savefig('./data/images/c1_c2_compare.pdf', format='pdf')
plt.close()
return None
def btl_c1_residuals_station_uncorrected_plot(refc_vals, c1_vals, press, stnno):
refc_c1 = refc_vals - c1_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, refc_c1, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND-CTDCOND1 (Uncorrected) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C1 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlcond_c1_stn_uncorrected.svg', format='svg')
fig.savefig('./data/images/btlcond_c1_stn_uncorrected.pdf', format='pdf')
plt.close()
return None
def btl_c2_residuals_station_uncorrected_plot(refc_vals, c2_vals, press, stnno):
refc_c2 = refc_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, refc_c2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('BTLCOND-CTDCOND2 (Uncorrected) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlcond_c2_stn_uncorrected.svg', format='svg')
fig.savefig('./data/images/btlcond_c2_stn_uncorrected.pdf', format='pdf')
plt.close()
return None
def c1_c2_residuals_station_uncorrected_plot(c1_vals, c2_vals, press, stnno):
c1_c2 = c1_vals - c2_vals
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, c1_c2, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('CTDCOND1-CTDCOND2 (Uncorrected) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('C1-C2 Residual (mS/cm)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/c1_c2_stn_uncorrected.svg', format='svg')
fig.savefig('./data/images/c1_c2_stn_uncorrected.pdf', format='pdf')
plt.close()
return None
def btl_sal_pressure_plot(btl_sal, ctd_sal, press, stnno):
sal_res = btl_sal - ctd_sal
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(sal_res,-press, marker='+', c=stnno, cmap=plt.cm.tab20c_r)
ax.set_xlim(-0.02,0.02)
ax.set_title('SALNTY-CTDSAL vs CTDPRS')
ax.set_xlabel('CTDSAL Residual (mPSU)')
ax.set_ylabel('Pressure (dbar)')
cbar = fig.colorbar(cm)
cbar.set_label('Station Number')
fig.savefig('./data/images/btlsal_sal_p.svg', format='svg')
fig.savefig('./data/images/btlsal_sal_p.pdf', format='pdf')
plt.close()
return None
def btl_sal_station_plot(btl_sal, ctd_sal, press, stnno):
sal_res = btl_sal - ctd_sal
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno, sal_res, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('SALNTY-CTDSAL vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('CTDSAL Residual (mPSU)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlsal_sal_stn.svg', format='svg')
fig.savefig('./data/images/btlsal_sal_stn.pdf', format='pdf')
plt.close()
return None
def btl_sal_station_deep_plot(btl_sal, ctd_sal, press, stnno):
sal_res = btl_sal - ctd_sal
df = pd.DataFrame()
df['CTDPRS'] = press
df['BTL_SAL'] = sal_res
df['STNNBR'] = stnno
df_deep = df[df['CTDPRS'] > 2000]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(df_deep['STNNBR'], df_deep['BTL_SAL'], marker='+', c=df_deep['CTDPRS'], cmap=plt.cm.viridis_r)
ax.set_ylim(-0.01,0.01)
ax.set_title('SALNTY-CTDSAL (>2000 db) vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('CTDSAL Residual (mPSU)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btlsal_sal_stn_deep.svg', format='svg')
fig.savefig('./data/images/btlsal_sal_stn_deep.pdf', format='pdf')
plt.close()
return None
#################################################################
######## Here lies the oxygen plots, long may they rest. ########
#################################################################
def btl_oxy_residuals_pressure_plot(ref_oxy, ctdoxy, press, stnno):
btl_o = ref_oxy - ctdoxy
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(btl_o, -press, marker='+', c=stnno, cmap=plt.cm.tab20c_r)
ax.set_xlim(-10,10)
ax.set_title('OXYGEN-CTDOXY vs CTDPRS')
ax.set_xlabel('CTDOXY Residual (umol/kg)')
ax.set_ylabel('Pressure (dbar)')
cbar = fig.colorbar(cm)
cbar.set_label('Station Number')
fig.savefig('./data/images/btl_oxy_p.svg', format='svg')
fig.savefig('./data/images/btl_oxy_p.pdf', format='pdf')
plt.close()
return None
def btl_oxy_residuals_station_plot(ref_oxy, ctdoxy, press, stnno):
btl_o = ref_oxy - ctdoxy
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cm = ax.scatter(stnno,btl_o, marker='+', c=press, cmap=plt.cm.viridis_r)
ax.set_ylim(-10,10)
ax.set_title('OXYGEN-CTDOXY vs STNNBR')
ax.set_xlabel('Station Number')
ax.set_ylabel('CTDOXY Residual (umol/kg)')
cbar = fig.colorbar(cm)
cbar.set_label('Pressure (dbar)')
fig.savefig('./data/images/btl_oxy_stn.svg', format='svg')
fig.savefig('./data/images/btl_oxy_stn.pdf', format='pdf')
plt.close()
return None
def btl_oxy_residuals_station_deep_plot(ref_oxy, ctdoxy, press, stnno):
df = | pd.DataFrame() | pandas.DataFrame |
"""
Obtains category distributions for included and excluded patients.
"""
from click import *
from logging import *
import pandas as pd
@command()
@option("--all-input", required=True, help="the CSV file to read all diagnoses from")
@option(
"--included-input",
required=True,
help="the CSV file to read diagnoses for included patients from",
)
@option("--output", required=True, help="the CSV file to write counts to")
def main(all_input, included_input, output):
basicConfig(level=DEBUG)
# Load data.
info("Loading all diagnoses")
X_all = | pd.read_csv(all_input, index_col="subject_id") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon May 21 21:08:09 2018
@author: <NAME>
"""
# System Utilities
import os
import io
import sys
import gc
import traceback
# Email and Text processing
import email
from email.header import decode_header
import re
import uuid # unique ID
# Data handling and analytics tools
import pandas as pd
import numpy as np
from timeit import default_timer as timer
def FileObject2String(FileBytes, FileName='PDF'):
print(FileName)
Text = ''
return Text
###############################################################################
#Data Cleanup
def RemoveTagsFormats(Text):
if Text!='':
# apply rules in given order!
rules = [
{ r'[\x20][\x20]+' : u' '}, # Remove Consecutive spaces
{ r'\s*<br\s*/?>\s*' : u'\n'}, # Convert <br> to Newline
{ r'</(p|h\d)\s*>\s*' : u'\n\n'}, # Add double newline after </p>, </div> and <h1/>
{ r'<head>.*<\s*(/head|body)[^>]*>' : u'' }, # Remove everything from <head> to </head>
{ r'<script>.*<\s*/script[^>]*>' : u'' }, # Remove evrything from <script> to </script> (javascipt)
{ r'<style>.*<\s*/style[^>]*>' : u'' }, # Remove evrything from <style> to </style> (stypesheet)
{ r'<[^<]*?/?>' : u'' }, # remove remaining tags
]
for rule in rules:
for (pattern,sub) in rule.items():
Text = re.sub(pattern, sub, Text)
#https://www.w3schools.com/charsets/ref_html_entities_4.asp
#https://docs.python.org/3/library/html.entities.html#html.entities.html5
Entity={
'<' :'<',
'>' :'>',
' ' :' ',
'& nbsp;' :' ',
'&n bsp;' :' ',
'&nb sp;' :' ',
'&nbs p;' :' ',
'"' :'"',
'cent;' :u'\xA2',
'£' :u'\xA3',
'©' :u'\xA9',
'®' :u'\xAE',
'±':u'\xB1',
'¼':u'\xBC',
'½':u'\xBD',
'¾':u'\xBE',
'×' :u'\xD7',
'′' :u'\x2032',
'&Prime' :u'\x2033',
'∗':u'\x2217',
'≠' :u'\x2260',
'™' :u'\x2122',
'–' :u'\x8211',
'’' :u'\x8217',
'&' :'&',
}
for (pattern,sub) in Entity.items():
Text = Text.replace(pattern, sub)
#Text=re.sub('[\x20][\x20]+' , ' ', Text)
#Text=re.sub('[\n][\n]+' , '\n', Text)
#Text=re.sub('[\t][\t]+' , '\t', Text)
Text=re.sub('[\r][\r]+' , '\r', Text)
Text=re.sub('[\n][\s]*[\n]+' , '\n', Text)
Text=re.sub('^[\s]+' , '', Text)
Text=re.sub('[\n][\s]+$' , '', Text)
else:
pass
return Text
###############################################################################
#RegEx
EMAIL_FORMAT = re.compile(r'([\w\.-]+@[\w\.-]+\.\w+)')
NAME_EMAIL_FORMAT = re.compile(r'\W*(.*?)\W*([\w\.-]+@[\w\.-]+\.\w+)')
DOMAIN_FORMAT = re.compile(r'@([\w\.-]+)')
MESSAGID_FORMAT = re.compile(r'<(.*?)>')
REFERENCES_FORMAT = re.compile(r'[,<](.*?)[,>]') #This need to be revised. in the case of '<ABC>,<DEF>' if will return '<DEF'
###############################################################################
###############################################################################
def DecodeEmailItem(Text):
try:
if Text!=None:
dc = decode_header(Text)
ItemValue = dc[0][0]
Encoding = dc[0][1]
if Encoding!=None:
Text = ItemValue.decode(Encoding)
else:
pass
else:
pass
except:
pass
return Text
###############################################################################
###############################################################################
# Function parsing a single email
###############################################################################
def ParseEmail(EmlFilePath, EmailID=None, HeaderFields = None, Attachments=None, WorkingDir = ''):
###########################################################################
# Set Header Feilds
if HeaderFields == None:
HeaderFields = ['DATE', 'FROM', 'TO', 'CC', 'BCC', 'SUBJECT', 'CONTENT-LANGUAGE', 'MESSAGE-ID',
'IN-REPLY-TO', 'REFERENCES', 'RETURN-PATH', 'X-MS-HAS-ATTACH', 'X-ORIGINATING-IP']
else:
pass
# Set Email ID
if EmailID == None:
try:
EmailID = str(uuid.uuid1())
except:
pass
else:
pass
###########################################################################
# Extract Header
HEADER_ITEM = pd.DataFrame(data=[[EmailID, EmlFilePath]], columns=['EmailID', 'FileName'])
try:
Email = email.message_from_file(open(EmlFilePath))
for item in HeaderFields:
HEADER_ITEM[item] = DecodeEmailItem(Email.get(item))
except:
pass
###########################################################################
# References
MSGID_ITEMS = pd.DataFrame(data=[], columns=['EmailID', 'Designation', 'MESSAGE-ID'])
try:
MID = pd.DataFrame(data=re.findall(MESSAGID_FORMAT, HEADER_ITEM['MESSAGE-ID'].values[0]), columns=['MESSAGE-ID'])
MID['Designation'] = 0
MID['Order'] = (MID.index+1).astype('int')
except:
pass
try:
MID = pd.DataFrame(data=re.findall(MESSAGID_FORMAT, HEADER_ITEM['IN-REPLY-TO'].values[0]), columns=['MESSAGE-ID'])
MID['Designation'] = 1
MID['Order'] = (MID.index+1).astype('int')
except:
pass
try:
MID = pd.DataFrame(data=re.findall(REFERENCES_FORMAT, HEADER_ITEM['REFERENCES'].values[0]), columns=['MESSAGE-ID'])
MID['Designation'] = 2
MID['Order'] = (MID.index+1).astype('int')
except:
pass
MSGID_ITEMS['EmailID'] = EmailID
del(MID)
###########################################################################
# Addresses
ADDRESS_ITEMS = pd.DataFrame(data=[], columns=['EmailID', 'Designation', 'Order', 'Name', 'Address' ])
try:
ADD = pd.DataFrame(data=re.findall(NAME_EMAIL_FORMAT, HEADER_ITEM['FROM'].values[0]),columns=['Name', 'Address'])
ADD['Designation']=0
ADD['Order'] = (ADD.index+1).astype('int')
ADDRESS_ITEMS = ADDRESS_ITEMS.append(ADD, ignore_index=False, sort=False)
except:
pass
try:
ADD = pd.DataFrame(data=re.findall(NAME_EMAIL_FORMAT, HEADER_ITEM['TO'].values[0]),columns=['Name', 'Address'])
ADD['Designation']=1
ADD['Order'] = (ADD.index+1).astype('int')
ADDRESS_ITEMS = ADDRESS_ITEMS.append(ADD, ignore_index=False, sort=False)
except:
pass
try:
ADD = pd.DataFrame(data=re.findall(NAME_EMAIL_FORMAT, HEADER_ITEM['CC'].values[0]),columns=['Name', 'Address'])
ADD['Designation']=2
ADD['Order'] = (ADD.index+1).astype('int')
ADDRESS_ITEMS = ADDRESS_ITEMS.append(ADD, ignore_index=False, sort=False)
except:
pass
try:
ADD = pd.DataFrame(data=re.findall(NAME_EMAIL_FORMAT, HEADER_ITEM['BCC'].values[0]),columns=['Name', 'Address'])
ADD['Designation']=3
ADD['Order'] = (ADD.index+1).astype('int')
ADDRESS_ITEMS = ADDRESS_ITEMS.append(ADD, ignore_index=False, sort=False)
except:
pass
ADDRESS_ITEMS['EmailID'] = EmailID
del(ADD)
###########################################################################
# Content
ContentID = 0
ContentParts = [] #Array of contents
BodyTextContentParts = []
###########################################################################
# Attchments
AttachmentParts = [] #Array of attchments
###########################################################################
# # Extarct Email Content
try:
for part in Email.walk():
ContentMainType = part.get_content_maintype()
ContentSubType = part.get_content_subtype()
if ContentMainType != 'multipart': #At the loweset level in the content hirachy
isBodyText=False
ContentID = ContentID + 1
ContentString=''
ContentDisposition = part.get_content_disposition()
ContentFileName = DecodeEmailItem(part.get_filename())
try:
ContentTransferEncoding = part.get('Content-Transfer-Encoding').upper()
except:
ContentTransferEncoding = 'QUOTED-PRINTABLE'
try:
MIMEContentID = re.findall(MESSAGID_FORMAT, part.get('Content-ID'))[0]
except:
MIMEContentID = None
ContentType = '[{}/{}]'.format(ContentMainType, ContentSubType)
if ContentMainType == 'text' and (ContentSubType == 'plain' or ContentSubType == 'html'):
try:
CharSet = part.get_content_charset()
except:
CharSet = 'iso8859_15'
try:
ContentString = part.get_payload(decode=True).decode(encoding=CharSet)
ContentString = RemoveTagsFormats(ContentString) #RemoveHTML(ContentString)
isBodyText = True
except:
pass
#--------------------------------------------------------------
elif ContentMainType == 'message' and ContentSubType == 'rfc822':
try:
ContentString = DecodeEmailItem(part.get('SUBJECT'))
isBodyText = True
except:
pass
#Record MessageID
#Append Subject, Text Body
#Seperate from next selection tasks
pass
#--------------------------------------------------------------
elif ContentMainType == 'audio' or ContentMainType == 'video':
pass
elif ContentMainType == 'application' and (ContentSubType == 'zip' or ContentSubType == 'x-7z-compressed' or ContentSubType=='x-rar-compressed'):
pass
elif (ContentDisposition == 'attachment' or ContentDisposition == 'inline') or ContentFileName != None:
if Attachments != None:
if ContentFileName == None: #Add condition not to exclude inline and attachemnt
ContentFileName = '<< FileName Blanck >>'
else:
AttachmentFileName = 'E{}A{}_{}'.format(EmailID, ContentID, ContentFileName)
#print(AttachmentFileName)
try:
startTime = timer()
AttachmentFileData = part.get_payload(decode=True)
#--------------------------------------------------------------
if Attachments == 'save':
file = open('{}\\{}'.format(WorkingDir,AttachmentFileName), 'wb')
file.write(AttachmentFileData)
file.close()
#--------------------------------------------------------------
if Attachments == 'extract':
if ContentTransferEncoding=='BASE64':
pass
else:
AttachmentFileData = AttachmentFileData.encode()
FileBytes=io.BytesIO(AttachmentFileData)
ByteLength = len(AttachmentFileData)/1024 #File size in Kilobytes by byte length
if ByteLength<=51200:
AttachmentText = FileObject2String(FileBytes, AttachmentFileName)
else:
AttachmentText = ''
ExecuteTime = timer() - startTime
ContentFileSize=int(FileBytes.seek(0,2)/1024) #File size in Kilobytes
AttachmentParts.append([EmailID, ContentID, ContentFileName, ContentFileSize, ContentType, MIMEContentID, ExecuteTime, AttachmentText])
FileBytes.close()
#--------------------------------------------------------------
except:
pass
else:
pass
else:
pass
ContentParts.append([EmailID, ContentID, ContentType, ContentDisposition, ContentFileName, MIMEContentID])
if isBodyText == True:
BodyTextContentParts.append([EmailID, ContentID, ContentType, ContentString])
else:
pass
#######################################################################
#Update Header with content Info summar
HEADER_ITEM['ContentCount'] = ContentID # Last content ID (start 1, 0 if none)
HEADER_ITEM['FileReadError'] = 0
#Create content andd attchemnt
CONTENT_ITEMS = pd.DataFrame(data=ContentParts, columns=['EmailID', 'ContentID', 'ContentType', 'ContentDisposition', 'ContentFileName', 'MIMEContentID'])
BODY_TEXT_ITEMS = pd.DataFrame(data=BodyTextContentParts, columns=['EmailID', 'ContentID', 'ContentType', 'ContentString'])
ATTACHMENT_ITEMS = pd.DataFrame(data=AttachmentParts, columns=['EmailID', 'ContentID', 'ContentFileName', 'ContentFileSize', 'ContentType', 'MIMEContentID', 'ExecuteTime', 'AttachmentText'])
#######################################################################
except:
print('ParseEmail Exception Occured: {}'.format(traceback.format_exc()))
HEADER_ITEM= pd.DataFrame(data=[[EmailID, EmlFilePath, 0, 1]], columns=['EmailID', 'FileName', 'ContentCount', 'FileReadError'])
BODY_TEXT_ITEMS = pd.DataFrame()
CONTENT_ITEMS = pd.DataFrame()
ATTACHMENT_ITEMS = | pd.DataFrame() | pandas.DataFrame |
"""
Author: <NAME>
Created: 27/08/2020 11:13 AM
"""
import pandas as pd
import os
import numpy as np
from supporting_functions.conversions import convert_RH_vpa
from supporting_functions.woodward_2020_params import get_woodward_mean_full_params
test_dir = os.path.join(os.path.dirname(__file__), 'test_data')
def establish_peyman_input(return_pet=False):
# use the scott farm so that it doesn't need irrigation
# time period [2010 - 2013)
# load weather data
weather_path = os.path.join(test_dir, 'hamilton_ruakura_ews2010-2013_{}.csv')
pressure = pd.read_csv(os.path.join(test_dir, 'hamilton_AWS_pressure.csv'),
skiprows=8).loc[:, ['year',
'doy',
'pmsl']].set_index(['year', 'doy'])
rain = pd.read_csv(weather_path.format('rain')).loc[:, ['year',
'doy',
'rain']].set_index(['year', 'doy'])
temp = pd.read_csv(weather_path.format('temp')).loc[:, ['year',
'doy',
'tmax', 'tmin']].set_index(['year', 'doy'])
rad = pd.read_csv(weather_path.format('rad')).loc[:, ['year',
'doy',
'radn']].set_index(['year', 'doy'])
wind = pd.read_csv(weather_path.format('wind')).loc[:, ['year',
'doy',
'wind']].set_index(['year', 'doy'])
pet = pd.read_csv(weather_path.format('pet')).loc[:, ['year',
'doy',
'pet']].set_index(['year', 'doy'])
rh = pd.read_csv(weather_path.format('rh')).loc[:, ['year',
'doy',
'rh']]
rh.loc[:, 'rh'] = pd.to_numeric(rh.rh, errors='coerce')
rh = rh.groupby(['year', 'doy']).mean()
dates = pd.Series(pd.date_range('2010-01-01', '2012-12-31'))
matrix_weather = pd.DataFrame({'year': dates.dt.year,
'doy': dates.dt.dayofyear,
'to_delete': 1}).set_index(['year', 'doy'])
matrix_weather = pd.merge(matrix_weather, temp, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, rain, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, rad, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, rh, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, wind, how='outer', left_index=True, right_index=True)
matrix_weather = | pd.merge(matrix_weather, pet, how='outer', left_index=True, right_index=True) | pandas.merge |
"""
The training function used in the finetuning task.
"""
import csv
import logging
import os
import pickle
import time
from argparse import Namespace
from logging import Logger
from typing import List
import numpy as np
import pandas as pd
import torch
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
from grover.data import MolCollator
from grover.data import StandardScaler
from grover.util.metrics import get_metric_func
from grover.util.nn_utils import initialize_weights, param_count
from grover.util.scheduler import NoamLR
from grover.util.utils import build_optimizer, build_lr_scheduler, makedirs, load_checkpoint, get_loss_func, \
save_checkpoint, build_model
from grover.util.utils import get_class_sizes, get_data, split_data, get_task_names
from task.predict import predict, evaluate, evaluate_predictions
def train(epoch, model, data, loss_func, optimizer, scheduler,
shared_dict, args: Namespace, n_iter: int = 0,
logger: logging.Logger = None):
"""
Trains a model for an epoch.
:param model: Model.
:param data: A MoleculeDataset (or a list of MoleculeDatasets if using moe).
:param loss_func: Loss function.
:param optimizer: An Optimizer.
:param scheduler: A learning rate scheduler.
:param args: Arguments.
:param n_iter: The number of iterations (training examples) trained on so far.
:param logger: A logger for printing intermediate results.
:param writer: A tensorboardX SummaryWriter.
:return: The total number of iterations (training examples) trained on so far.
"""
# debug = logger.debug if logger is not None else print
model.train()
# data.shuffle()
loss_sum, iter_count = 0, 0
cum_loss_sum, cum_iter_count = 0, 0
mol_collator = MolCollator(shared_dict=shared_dict, args=args)
num_workers = 4
if type(data) == DataLoader:
mol_loader = data
else:
mol_loader = DataLoader(data, batch_size=args.batch_size, shuffle=True,
num_workers=num_workers, collate_fn=mol_collator)
for _, item in enumerate(mol_loader):
_, batch, features_batch, mask, targets = item
if next(model.parameters()).is_cuda:
mask, targets = mask.cuda(), targets.cuda()
class_weights = torch.ones(targets.shape)
if args.cuda:
class_weights = class_weights.cuda()
# Run model
model.zero_grad()
preds = model(batch, features_batch)
loss = loss_func(preds, targets) * class_weights * mask
loss = loss.sum() / mask.sum()
loss_sum += loss.item()
iter_count += args.batch_size
cum_loss_sum += loss.item()
cum_iter_count += 1
loss.backward()
optimizer.step()
if isinstance(scheduler, NoamLR):
scheduler.step()
n_iter += args.batch_size
#if (n_iter // args.batch_size) % args.log_frequency == 0:
# lrs = scheduler.get_lr()
# loss_avg = loss_sum / iter_count
# loss_sum, iter_count = 0, 0
# lrs_str = ', '.join(f'lr_{i} = {lr:.4e}' for i, lr in enumerate(lrs))
return n_iter, cum_loss_sum / cum_iter_count
def run_training(args: Namespace, time_start, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# pin GPU to local rank.
idx = args.gpu
if args.gpu is not None:
torch.cuda.set_device(idx)
features_scaler, scaler, shared_dict, test_data, train_data, val_data = load_data(args, debug, logger)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Train ensemble of models
for model_idx in range(args.ensemble_size):
# Tensorboard writer
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
# Load/build model
if args.checkpoint_paths is not None:
if len(args.checkpoint_paths) == 1:
cur_model = 0
else:
cur_model = model_idx
debug(f'Loading model {cur_model} from {args.checkpoint_paths[cur_model]}')
model = load_checkpoint(args.checkpoint_paths[cur_model], current_args=args, logger=logger)
else:
debug(f'Building model {model_idx}')
model = build_model(model_idx=model_idx, args=args)
if args.fine_tune_coff != 1 and args.checkpoint_paths is not None:
debug("Fine tune fc layer with different lr")
initialize_weights(model_idx=model_idx, model=model.ffn, distinct_init=args.distinct_init)
# Get loss and metric functions
loss_func = get_loss_func(args, model)
optimizer = build_optimizer(model, args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.cuda()
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
# Learning rate schedulers
scheduler = build_lr_scheduler(optimizer, args)
# Bulid data_loader
shuffle = True
mol_collator = MolCollator(shared_dict={}, args=args)
train_data = DataLoader(train_data,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=10,
collate_fn=mol_collator)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
min_val_loss = float('inf')
for epoch in range(args.epochs):
s_time = time.time()
n_iter, train_loss = train(
epoch=epoch,
model=model,
data=train_data,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
shared_dict=shared_dict,
logger=logger
)
t_time = time.time() - s_time
s_time = time.time()
val_scores, val_loss = evaluate(
model=model,
data=val_data,
loss_func=loss_func,
num_tasks=args.num_tasks,
metric_func=metric_func,
batch_size=args.batch_size,
dataset_type=args.dataset_type,
scaler=scaler,
shared_dict=shared_dict,
logger=logger,
args=args
)
v_time = time.time() - s_time
# Average validation score
avg_val_score = np.nanmean(val_scores)
# Logged after lr step
if isinstance(scheduler, ExponentialLR):
scheduler.step()
if args.show_individual_scores:
# Individual validation scores
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
print('Epoch: {:04d}'.format(epoch),
'loss_train: {:.6f}'.format(train_loss),
'loss_val: {:.6f}'.format(val_loss),
f'{args.metric}_val: {avg_val_score:.4f}',
# 'auc_val: {:.4f}'.format(avg_val_score),
'cur_lr: {:.5f}'.format(scheduler.get_lr()[-1]),
't_time: {:.4f}s'.format(t_time),
'v_time: {:.4f}s'.format(v_time))
if args.tensorboard:
writer.add_scalar('loss/train', train_loss, epoch)
writer.add_scalar('loss/val', val_loss, epoch)
writer.add_scalar(f'{args.metric}_val', avg_val_score, epoch)
# Save model checkpoint if improved validation score
if args.select_by_loss:
if val_loss < min_val_loss:
min_val_loss, best_epoch = val_loss, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
else:
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
if epoch - best_epoch > args.early_stop_epoch:
break
ensemble_scores = 0.0
# Evaluate on test set using model with best validation score
if args.select_by_loss:
info(f'Model {model_idx} best val loss = {min_val_loss:.6f} on epoch {best_epoch}')
else:
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)
test_preds, _ = predict(
model=model,
data=test_data,
loss_func=loss_func,
batch_size=args.batch_size,
logger=logger,
shared_dict=shared_dict,
scaler=scaler,
args=args
)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds, dtype=float)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
ind = [['preds'] * args.num_tasks + ['targets'] * args.num_tasks, args.task_names * 2]
ind = pd.MultiIndex.from_tuples(list(zip(*ind)))
data = np.concatenate([np.array(avg_test_preds), np.array(test_targets)], 1)
test_result = | pd.DataFrame(data, index=test_smiles, columns=ind) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import demjson
import logging
import pandas as pd
import requests
from zvt.api.common import generate_kdata_id
from zvt.recorders.consts import EASTMONEY_ETF_NET_VALUE_HEADER
from zvt.api.technical import get_kdata
from zvt.domain import Index, Provider, SecurityType, StoreCategory, TradingLevel, Index1DKdata
from zvt.recorders.recorder import ApiWrapper, FixedCycleDataRecorder, TimeSeriesFetchingStyle
from zvt.utils.time_utils import to_time_str
from zvt.utils.utils import init_process_log
logger = logging.getLogger(__name__)
class MyApiWrapper(ApiWrapper):
def request(self, url=None, method='post', param=None, path_fields=None):
security_item = param['security_item']
size = param['size']
url = url.format(security_item.exchange, security_item.code, size)
response = requests.get(url)
response_json = demjson.decode(response.text)
if response_json is None or len(response_json) == 0:
return []
df = pd.DataFrame(response_json)
df.rename(columns={'day': 'timestamp'}, inplace=True)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['name'] = security_item.name
df['provider'] = Provider.SINA.value
df['level'] = param['level']
return df.to_dict(orient='records')
class ChinaETFDayKdataRecorder(FixedCycleDataRecorder):
meta_provider = Provider.EXCHANGE
meta_schema = Index
provider = Provider.SINA
store_category = StoreCategory.index_1d_kdata
data_schema = Index1DKdata
url = 'http://money.finance.sina.com.cn/quotes_service/api/json_v2.php/CN_MarketData.getKLineData?' \
'symbol={}{}&scale=240&&datalen={}&ma=no'
api_wrapper = MyApiWrapper()
def __init__(self, security_type=SecurityType.index, exchanges=['sh', 'sz'], codes=None, batch_size=10,
force_update=False, sleeping_time=5, fetching_style=TimeSeriesFetchingStyle.end_size,
default_size=2000, contain_unfinished_data=False, level=TradingLevel.LEVEL_1DAY,
one_shot=True) -> None:
super().__init__(security_type, exchanges, codes, batch_size, force_update, sleeping_time, fetching_style,
default_size, contain_unfinished_data, level, one_shot)
def get_data_map(self):
return {}
def generate_domain_id(self, security_item, original_data):
return generate_kdata_id(security_id=security_item.id, timestamp=original_data['timestamp'], level=self.level)
def generate_request_param(self, security_item, start, end, size, timestamp):
# 此 url 不支持分页,如果超过我们想取的条数,则只能取最大条数
if start is None or size > self.default_size:
size = 8000
return {
'security_item': security_item,
'level': self.level.value,
'size': size
}
def on_finish(self, security_item):
kdatas = get_kdata(security_id=security_item.id, level=TradingLevel.LEVEL_1DAY.value,
order=Index1DKdata.timestamp.asc(),
return_type='domain', session=self.session,
filters=[Index1DKdata.cumulative_net_value.is_(None)])
if kdatas and len(kdatas) > 0:
start = kdatas[0].timestamp
end = kdatas[-1].timestamp
# 从东方财富获取基金累计净值
df = self.fetch_cumulative_net_value(security_item, start, end)
if df is not None and not df.empty:
for kdata in kdatas:
if kdata.timestamp in df.index:
kdata.cumulative_net_value = df.loc[kdata.timestamp, 'LJJZ']
kdata.change_pct = df.loc[kdata.timestamp, 'JZZZL']
self.session.commit()
self.logger.info(f'{security_item.code} - {security_item.name}累计净值更新完成...')
def fetch_cumulative_net_value(self, security_item, start, end) -> pd.DataFrame:
query_url = 'http://api.fund.eastmoney.com/f10/lsjz?' \
'fundCode={}&pageIndex={}&pageSize=200&startDate={}&endDate={}'
page = 1
df = pd.DataFrame()
while True:
url = query_url.format(security_item.code, page, to_time_str(start), to_time_str(end))
response = requests.get(url, headers=EASTMONEY_ETF_NET_VALUE_HEADER)
response_json = demjson.decode(response.text)
response_df = pd.DataFrame(response_json['Data']['LSJZList'])
# 最后一页
if response_df.empty:
break
response_df['FSRQ'] = pd.to_datetime(response_df['FSRQ'])
response_df['JZZZL'] = pd.to_numeric(response_df['JZZZL'], errors='coerce')
response_df['LJJZ'] = pd.to_numeric(response_df['LJJZ'], errors='coerce')
response_df = response_df.fillna(0)
response_df.set_index('FSRQ', inplace=True, drop=True)
df = | pd.concat([df, response_df]) | pandas.concat |
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
import multiprocessing as mp
import pandas.io.data as web
import matplotlib.pyplot as plt
def compTrade(dt):
d=0.001
dt['reg']=np.where(dt['dmacd']>d,1,0)
dt['reg']=np.where(dt['dmacd']<-d,-1,dt['reg'])
dt['strategy']=dt['reg'].shift(1)*dt['market']
return dt
def getSymbols(h5):
l=[]
for c in h5.keys():
l.append(h5[c]['Adj Close'].columns.values)
return [item for items in l for item in items]
def getClose(h5,sym):
df = | pd.DataFrame() | pandas.DataFrame |
"""
"""
__author__ = ""
__version__ = ""
import os
import json
from uuid import UUID
from flask import g
import pandas as pd
# Function to save the recieved JSON file to disk
def jsonDump(name, struct, dir = os.getcwd() + "\\"):
print('JSON dump')
# Open a file for writing, filename will always be unique so append functions uneccessary
with open(dir + name, 'w') as f:
# Save the JSON to a JSON file on disk
json.dump(struct, f)
# Function to save the processed data to a CSV
def csvDump(fileName, struct, index_set = False, index_label_usr = False, dir = os.getcwd() + "\\"):
if os.path.exists(dir + fileName + '.csv'):
print('CSV Append')
with open(dir + fileName + '.csv', 'a', encoding="utf-8", newline="") as fd:
struct.to_csv(fd, header=False, index=index_set)
else:
print('CSV Create')
struct.to_csv(dir + fileName + '.csv', header=True, index=index_set, index_label = index_label_usr)
# Convert returned strings from the DB into GUID
def strToUUID(struct):
# Remove the leading and trailing characters from the ID
struct = struct.replace("[('", "")
struct = struct.replace("', )]", "")
# Convert trimmed string into a GUID (UUID)
g.strUUID = UUID(struct)
# Return to calling function
return g.strUUID
# Function for splitting dataframes with concatonated values into multiple rows
# Solution provided by user: zouweilin
# Solution link: https://gist.github.com/jlln/338b4b0b55bd6984f883
# Modified to use a delimeter regex pattern, so rows can be split using different delimeters
# TODO: #2 Fix 'pop from empty stack' error while parsing through sensors without a split
import re
def split_dataframe_rows(df,column_selectors, delimiters):
# we need to keep track of the ordering of the columns
print('Splitting rows...')
regexPattern = "|".join(map(re.escape,delimiters))
def _split_list_to_rows(row,row_accumulator,column_selector,regexPattern):
split_rows = {}
max_split = 0
for column_selector in column_selectors:
split_row = re.split(regexPattern,row[column_selector])
split_rows[column_selector] = split_row
if len(split_row) > max_split:
max_split = len(split_row)
for i in range(max_split):
new_row = row.to_dict()
for column_selector in column_selectors:
try:
new_row[column_selector] = split_rows[column_selector].pop(0)
except IndexError:
new_row[column_selector] = ''
row_accumulator.append(new_row)
new_rows = []
df.apply(_split_list_to_rows,axis=1,args = (new_rows,column_selectors,regexPattern))
new_df = pd.DataFrame(new_rows, columns=df.columns)
return new_df
# Monnit data contains trailing values on sensors with more than one measurand,
# and needs to be processed out before properly splitting.
# This function should check the name of the sensors for a list of known sensor
# types and remove the trailing values.
def rmTrailingValues(df, sensors):
print ('Removing trailing values from sensors')
# Pre-compile the regex statement for the used sensors using the list of sensors provided via paraeter
p = re.compile('|'.join(map(re.escape, sensors)), flags=re.IGNORECASE)
# Locate any entries that begin with the sensor names provided in the list
# using the prepared regex and remove 4 characters from the raw data variable
#df.loc[[bool(p.match(x)) for x in df['sensorName']], ['rawData']] = df['rawData'].str[:-4]
df.loc[[bool(p.match(x)) for x in df['sensorName']], ['rawData']] = df.loc[[bool(p.match(x)) for x in df['sensorName']], 'rawData'].astype('str').str[:-4]
return df
# Multiple networks can be configured on the Monnit system.
# This function will filter out unwanted networks by keeping
# networks with the IDs that are passed to the function.
def filterNetwork(df, networkID):
print('Filtering out unwanted network')
df = df[df.networkID == networkID]
return df
def aqProcessing(df):
print("Processing AQ sensor data")
# Add an additional '0' to dataValue and rawData columns to preserve varible ordering when the variable is split
df.loc[(df.plotLabels == '?g/m^3|PM1|PM2.5|PM10'), 'dataValue'] = "0|" + df.loc[(df.plotLabels == '?g/m^3|PM1|PM2.5|PM10'), 'dataValue']
df.loc[(df.plotLabels == '?g/m^3|PM1|PM2.5|PM10'), 'rawData'] = "0%7c" + df.loc[(df.plotLabels == '?g/m^3|PM1|PM2.5|PM10'), 'rawData']
# Add another occurance of 'Micrograms' to the dataType column to prevent Null entries upon splitting the dataframe.
df.loc[(df.dataType == 'Micrograms|Micrograms|Micrograms'), 'dataType'] = 'Micrograms|Micrograms|Micrograms|Micrograms'
# Create a dataframe from Air Quality entries
includedColumns = df.loc[df['plotLabels']=='?g/m^3|PM1|PM2.5|PM10']
for i, x in includedColumns.iterrows():
# Split the data so it can be re-rodered
rawDataList = x.rawData.split('%7c')
# Re-order the processed data into the proper order (PM1, 2.5, 10) and insert the original split delimiter
includedColumns.loc[i, 'rawData'] = str(rawDataList[0]) + '%7c' + str(rawDataList[3]) + '%7c' + str(rawDataList[1]) + '%7c' + str(rawDataList[2])
# Overrite the air quality data with the modified data that re-orders the variables
df = includedColumns.combine_first(df)
return df
# Split the data by sensor ID and export the data to separate CSV
# files and an XLSX file with separate worksheets per sensor
def sortSensors(df, col):
print('Sorting and cleaning sensors')
# Sort the values in the dataframe by their sensor ID
df.sort_values(by = [col], inplace = True)
# Set the DF index to the sensor IDs
df.set_index(keys = [col], drop = False, inplace = True)
# Remove existing index names
df.index.name = None
return df
# Pivot passed in DF to make analysis easier.
# 'values', 'index', and 'columns', are all lists of variables
def pivotTable(df, values, index, columns, aggFunc):
print('Pivoting data...')
#df = pd.pivot_table(df, values=['rawData', 'dataValue', 'plotValues'], index=['sensorID'], columns=['dataType', 'plotLabels'], aggfunc=np.sum)
df = | pd.pivot_table(df, values=values, index=index, columns=columns, aggfunc=aggFunc) | pandas.pivot_table |
import pandas as pd
import numpy as np
import re
## Different than what was expected, creating a unique for for every DF column
## performed a slower execution than having different fors for each DF column
def cleanInvalidDFEntries(id_key,stamp,actor_column,verb_column,object_column):
day = [] # Check which day of the week the timestamp represent
day_shift = [] # Check which shift of the day the time stamp represent: night, morning, afternoon
actor = [] # List of respective actor's name
lang = [] # List of respective language of the verb
action = [] # List of verbs
object_aim = [] # List of afected object
for i in range(len(id_key)):
# Test which part of the day is and add it to the list
day.append(stamp[i].day_name()) # Check which day of the week is
if stamp[i].hour < 6 or stamp[i].hour >= 20:
day_shift.append('night')
elif stamp[i].hour >= 6 and stamp[i].hour < 12:
day_shift.append('morning')
else:
day_shift.append('afternoon')
# Grab the raw actor text in the CSV file as str, convert it to dict and collect the actor's name
if not (actor_column[i] is np.nan): # Check if actor's name exists, if not, add a NaN which will be purged later
if 'name' in actor_column[i].keys(): # Dict format: {'name':'person'}. Check if the key 'name' exists
if actor_column[i]['name'] != "":
actor.append(actor_column[i]['name']) # This line appends the actor of the current iteration
else:
actor.append(np.nan)
else:
actor.append(np.nan)
else:
actor.append(np.nan)
# Grab the raw verb text in the CSV file as str, convert it to dict and collect the verb and the language
if not (verb_column[i] is np.nan):
if 'id' in verb_column[i].keys(): # dict format: {'display': {'language':'verb'}}. Check if 'display' exists
if verb_column[i]['id'] != "":
action.append(re.split("/",verb_column[i]['id'])[-1]) # collects the verb in the current iteration and append it, NaN otherwise
else:
action.append(np.nan)
else:
action.append(np.nan)
if 'display' in verb_column[i].keys(): # dict format: {'display': {'language':'verb'}}. Check if 'display' exists
if verb_column[i]['display'][list(verb_column[i]['display'].keys())[0]] != "":
lang.append(list(verb_column[i]['display'].keys())[0]) # this line appends the language value to the lang list, NaN otherwise
else:
lang.append(np.nan)
else:
lang.append(np.nan)
else:
action.append(np.nan)
lang.append(np.nan)
# Grab the raw verb text in the CSV file as str, convert it to dict and collect the object
if not (object_column[i] is np.nan): # dict format: {'definition':{'name':{'es-US':'object'}}}
if 'definition' in object_column[i].keys(): # check if the key 'definition' exists. Appends NaN otherwise
if 'name' in object_column[i]['definition'].keys(): # check if the key 'name' exists. Appends NaN otherwise
if object_column[i]['definition'][list(object_column[i]['definition'].keys())[0]] \
[list(object_column[i]['definition'][list(object_column[i]['definition'].keys())[0]].keys())[0]] != "":
object_aim.append(object_column[i]['definition'] # This line appends the object of the current iteration
[list(object_column[i]['definition'].keys())[0]]
[list(object_column[i]['definition'][list(object_column[i]['definition'].keys())[0]].keys())[0]])
else:
object_aim.append(np.nan)
else:
object_aim.append(np.nan)
else:
object_aim.append(np.nan)
else:
object_aim.append(np.nan)
d = | pd.DataFrame(data={'id':id_key,'timestamp':stamp,'weekday':day,'dayshift':day_shift,'actor':actor,'verb':action,'object':object_aim,'language':lang}) | pandas.DataFrame |
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.toolkit.topology import Molecule, Topology
from biopandas.pdb import PandasPdb
import matplotlib.pyplot as plt
from operator import itemgetter
from mendeleev import element
from simtk.openmm import app
from scipy import optimize
import subprocess as sp
from sys import stdout
import pandas as pd
import numpy as np
import statistics
import itertools
import parmed
import pickle
import shutil
import simtk
import scipy
import time
import math
import sys
import ast
import re
import os
BOHRS_PER_ANGSTROM = 0.529
HARTREE_PER_KCAL_MOL = 627.509391
#kcal/mol * A^2 to kJ/mol * nm^2
KCAL_MOL_PER_KJ_MOL = 4.184
ANGSTROMS_PER_NM = 10.0
RADIANS_PER_DEGREE = np.pi / 180.0
method_basis_scale_dict = {
"HF STO-3G": 0.817,
"HF 3-21G": 0.906,
"HF 3-21G*": 0.903,
"HF 6-31G": 0.903,
"HF 6-31G*": 0.899,
"HF 6-31G**": 0.903,
"HF 6-31+G**": 0.904,
"HF 6-311G*": 0.904,
"HF 6-311G**": 0.909,
"HF TZVP": 0.909,
"HF cc-pVDZ": 0.908,
"HF cc-pVTZ": 0.91,
"HF cc-pVQZ": 0.908,
"HF aug-cc-pVDZ": 0.911,
"HF aug-cc-pVTZ": 0.91,
"HF aug-cc-pVQZ": 0.909,
"HF daug-cc-pVDZ": 0.912,
"HF daug-cc-pVTZ": 0.905,
"ROHF 3-21G": 0.907,
"ROHF 3-21G*": 0.909,
"ROHF 6-31G": 0.895,
"ROHF 6-31G*": 0.89,
"ROHF 6-31G**": 0.855,
"ROHF 6-31+G**": 0.856,
"ROHF 6-311G*": 0.856,
"ROHF 6-311G**": 0.913,
"ROHF cc-pVDZ": 0.861,
"ROHF cc-pVTZ": 0.901,
"LSDA STO-3G": 0.896,
"LSDA 3-21G": 0.984,
"LSDA 3-21G*": 0.982,
"LSDA 6-31G": 0.98,
"LSDA 6-31G*": 0.981,
"LSDA 6-31G**": 0.981,
"LSDA 6-31+G**": 0.985,
"LSDA 6-311G*": 0.984,
"LSDA 6-311G**": 0.988,
"LSDA TZVP": 0.988,
"LSDA cc-pVDZ": 0.989,
"LSDA cc-pVTZ": 0.989,
"LSDA aug-cc-pVDZ": 0.989,
"LSDA aug-cc-pVTZ": 0.991,
"BLYP STO-3G": 0.925,
"BLYP 3-21G": 0.995,
"BLYP 3-21G*": 0.994,
"BLYP 6-31G": 0.992,
"BLYP 6-31G*": 0.992,
"BLYP 6-31G**": 0.992,
"BLYP 6-31+G**": 0.995,
"BLYP 6-311G*": 0.998,
"BLYP 6-311G**": 0.996,
"BLYP TZVP": 0.998,
"BLYP cc-pVDZ": 1.002,
"BLYP cc-pVTZ": 0.997,
"BLYP aug-cc-pVDZ": 0.998,
"BLYP aug-cc-pVTZ": 0.997,
"B1B95 STO-3G": 0.883,
"B1B95 3-21G": 0.957,
"B1B95 3-21G*": 0.955,
"B1B95 6-31G": 0.954,
"B1B95 6-31G*": 0.949,
"B1B95 6-31G**": 0.955,
"B1B95 6-31+G**": 0.957,
"B1B95 6-311G*": 0.959,
"B1B95 6-311G**": 0.96,
"B1B95 TZVP": 0.957,
"B1B95 cc-pVDZ": 0.961,
"B1B95 cc-pVTZ": 0.957,
"B1B95 aug-cc-pVDZ": 0.958,
"B1B95 aug-cc-pVTZ": 0.959,
"B3LYP STO-3G": 0.892,
"B3LYP 3-21G": 0.965,
"B3LYP 3-21G*": 0.962,
"B3LYP 6-31G": 0.962,
"B3LYP 6-31G*": 0.96,
"B3LYP 6-31G**": 0.961,
"B3LYP 6-31+G**": 0.964,
"B3LYP 6-311G*": 0.966,
"B3LYP 6-311G**": 0.967,
"B3LYP TZVP": 0.965,
"B3LYP cc-pVDZ": 0.97,
"B3LYP cc-pVTZ": 0.967,
"B3LYP cc-pVQZ": 0.969,
"B3LYP aug-cc-pVDZ": 0.97,
"B3LYP aug-cc-pVTZ": 0.968,
"B3LYP aug-cc-pVQZ": 0.969,
"B3PW91 STO-3G": 0.885,
"B3PW91 3-21G": 0.961,
"B3PW91 3-21G*": 0.959,
"B3PW91 6-31G": 0.958,
"B3PW91 6-31G*": 0.957,
"B3PW91 6-31G**": 0.958,
"B3PW91 6-31+G**": 0.96,
"B3PW91 6-311G*": 0.963,
"B3PW91 6-311G**": 0.963,
"B3PW91 TZVP": 0.964,
"B3PW91 cc-pVDZ": 0.965,
"B3PW91 cc-pVTZ": 0.962,
"B3PW91 aug-cc-pVDZ": 0.965,
"B3PW91 aug-cc-pVTZ": 0.965,
"mPW1PW91 STO-3G": 0.879,
"mPW1PW91 3-21G": 0.955,
"mPW1PW91 3-21G*": 0.95,
"mPW1PW91 6-31G": 0.947,
"mPW1PW91 6-31G*": 0.948,
"mPW1PW91 6-31G**": 0.952,
"mPW1PW91 6-31+G**": 0.952,
"mPW1PW91 6-311G*": 0.954,
"mPW1PW91 6-311G**": 0.957,
"mPW1PW91 TZVP": 0.954,
"mPW1PW91 cc-pVDZ": 0.958,
"mPW1PW91 cc-pVTZ": 0.959,
"mPW1PW91 aug-cc-pVDZ": 0.958,
"mPW1PW91 aug-cc-pVTZ": 0.958,
"PBEPBE STO-3G": 0.914,
"PBEPBE 3-21G": 0.991,
"PBEPBE 3-21G*": 0.954,
"PBEPBE 6-31G": 0.986,
"PBEPBE 6-31G*": 0.986,
"PBEPBE 6-31G**": 0.986,
"PBEPBE 6-31+G**": 0.989,
"PBEPBE 6-311G*": 0.99,
"PBEPBE 6-311G**": 0.991,
"PBEPBE TZVP": 0.989,
"PBEPBE cc-pVDZ": 0.994,
"PBEPBE cc-pVTZ": 0.993,
"PBEPBE aug-cc-pVDZ": 0.994,
"PBEPBE aug-cc-pVTZ": 0.994,
"PBE1PBE STO-3G": 0.882,
"PBE1PBE 3-21G": 0.96,
"PBE1PBE 3-21G*": 0.96,
"PBE1PBE 6-31G": 0.956,
"PBE1PBE 6-31G*": 0.95,
"PBE1PBE 6-31G**": 0.953,
"PBE1PBE 6-31+G**": 0.955,
"PBE1PBE 6-311G*": 0.959,
"PBE1PBE 6-311G**": 0.959,
"PBE1PBE TZVP": 0.96,
"PBE1PBE cc-pVDZ": 0.962,
"PBE1PBE cc-pVTZ": 0.961,
"PBE1PBE aug-cc-pVDZ": 0.962,
"PBE1PBE aug-cc-pVTZ": 0.962,
"HSEh1PBE STO-3G": 0.883,
"HSEh1PBE 3-21G": 0.963,
"HSEh1PBE 3-21G*": 0.96,
"HSEh1PBE 6-31G": 0.957,
"HSEh1PBE 6-31G*": 0.951,
"HSEh1PBE 6-31G**": 0.954,
"HSEh1PBE 6-31+G**": 0.955,
"HSEh1PBE 6-311G*": 0.96,
"HSEh1PBE 6-311G**": 0.96,
"HSEh1PBE TZVP": 0.96,
"HSEh1PBE cc-pVDZ": 0.962,
"HSEh1PBE cc-pVTZ": 0.961,
"HSEh1PBE aug-cc-pVDZ": 0.962,
"HSEh1PBE aug-cc-pVTZ": 0.962,
"TPSSh 3-21G": 0.969,
"TPSSh 3-21G*": 0.966,
"TPSSh 6-31G": 0.962,
"TPSSh 6-31G*": 0.959,
"TPSSh 6-31G**": 0.959,
"TPSSh 6-31+G**": 0.963,
"TPSSh 6-311G*": 0.963,
"TPSSh TZVP": 0.964,
"TPSSh cc-pVDZ": 0.972,
"TPSSh cc-pVTZ": 0.968,
"TPSSh aug-cc-pVDZ": 0.967,
"TPSSh aug-cc-pVTZ": 0.965,
"B97D3 3-21G": 0.983,
"B97D3 6-31G*": 0.98,
"B97D3 6-31+G**": 0.983,
"B97D3 6-311G**": 0.986,
"B97D3 TZVP": 0.986,
"B97D3 cc-pVDZ": 0.992,
"B97D3 cc-pVTZ": 0.986,
"B97D3 aug-cc-pVTZ": 0.985,
"MP2 STO-3G": 0.872,
"MP2 3-21G": 0.955,
"MP2 3-21G*": 0.951,
"MP2 6-31G": 0.957,
"MP2 6-31G*": 0.943,
"MP2 6-31G**": 0.937,
"MP2 6-31+G**": 0.941,
"MP2 6-311G*": 0.95,
"MP2 6-311G**": 0.95,
"MP2 TZVP": 0.948,
"MP2 cc-pVDZ": 0.953,
"MP2 cc-pVTZ": 0.95,
"MP2 cc-pVQZ": 0.948,
"MP2 aug-cc-pVDZ": 0.959,
"MP2 aug-cc-pVTZ": 0.953,
"MP2 aug-cc-pVQZ": 0.95,
"MP2=FULL STO-3G": 0.889,
"MP2=FULL 3-21G": 0.955,
"MP2=FULL 3-21G*": 0.948,
"MP2=FULL 6-31G": 0.95,
"MP2=FULL 6-31G*": 0.942,
"MP2=FULL 6-31G**": 0.934,
"MP2=FULL 6-31+G**": 0.939,
"MP2=FULL 6-311G*": 0.947,
"MP2=FULL 6-311G**": 0.949,
"MP2=FULL TZVP": 0.953,
"MP2=FULL cc-pVDZ": 0.95,
"MP2=FULL cc-pVTZ": 0.949,
"MP2=FULL cc-pVQZ": 0.957,
"MP2=FULL aug-cc-pVDZ": 0.969,
"MP2=FULL aug-cc-pVTZ": 0.951,
"MP2=FULL aug-cc-pVQZ": 0.956,
"MP3 STO-3G": 0.894,
"MP3 3-21G": 0.968,
"MP3 3-21G*": 0.965,
"MP3 6-31G": 0.966,
"MP3 6-31G*": 0.939,
"MP3 6-31G**": 0.935,
"MP3 6-31+G**": 0.931,
"MP3 TZVP": 0.935,
"MP3 cc-pVDZ": 0.948,
"MP3 cc-pVTZ": 0.945,
"MP3=FULL 6-31G*": 0.938,
"MP3=FULL 6-31+G**": 0.932,
"MP3=FULL TZVP": 0.934,
"MP3=FULL cc-pVDZ": 0.94,
"MP3=FULL cc-pVTZ": 0.933,
"B2PLYP 6-31G*": 0.949,
"B2PLYP 6-31+G**": 0.952,
"B2PLYP TZVP": 0.954,
"B2PLYP cc-pVDZ": 0.958,
"B2PLYP cc-pVTZ": 0.959,
"B2PLYP cc-pVQZ": 0.957,
"B2PLYP aug-cc-pVTZ": 0.961,
"B2PLYP=FULL 3-21G": 0.952,
"B2PLYP=FULL 6-31G*": 0.948,
"B2PLYP=FULL 6-31+G**": 0.951,
"B2PLYP=FULL TZVP": 0.954,
"B2PLYP=FULL cc-pVDZ": 0.959,
"B2PLYP=FULL cc-pVTZ": 0.956,
"B2PLYP=FULL aug-cc-pVDZ": 0.962,
"B2PLYP=FULL aug-cc-pVTZ": 0.959,
"CID 3-21G": 0.932,
"CID 3-21G*": 0.931,
"CID 6-31G": 0.935,
"CID 6-31G*": 0.924,
"CID 6-31G**": 0.924,
"CID 6-31+G**": 0.924,
"CID 6-311G*": 0.929,
"CID cc-pVDZ": 0.924,
"CID cc-pVTZ": 0.927,
"CISD 3-21G": 0.941,
"CISD 3-21G*": 0.934,
"CISD 6-31G": 0.938,
"CISD 6-31G*": 0.926,
"CISD 6-31G**": 0.918,
"CISD 6-31+G**": 0.922,
"CISD 6-311G*": 0.925,
"CISD cc-pVDZ": 0.922,
"CISD cc-pVTZ": 0.93,
"QCISD 3-21G": 0.969,
"QCISD 3-21G*": 0.961,
"QCISD 6-31G": 0.964,
"QCISD 6-31G*": 0.952,
"QCISD 6-31G**": 0.941,
"QCISD 6-31+G**": 0.945,
"QCISD 6-311G*": 0.957,
"QCISD 6-311G**": 0.954,
"QCISD TZVP": 0.955,
"QCISD cc-pVDZ": 0.959,
"QCISD cc-pVTZ": 0.956,
"QCISD aug-cc-pVDZ": 0.969,
"QCISD aug-cc-pVTZ": 0.962,
"CCD 3-21G": 0.972,
"CCD 3-21G*": 0.957,
"CCD 6-31G": 0.96,
"CCD 6-31G*": 0.947,
"CCD 6-31G**": 0.938,
"CCD 6-31+G**": 0.942,
"CCD 6-311G*": 0.955,
"CCD 6-311G**": 0.955,
"CCD TZVP": 0.948,
"CCD cc-pVDZ": 0.957,
"CCD cc-pVTZ": 0.934,
"CCD aug-cc-pVDZ": 0.965,
"CCD aug-cc-pVTZ": 0.957,
"CCSD 3-21G": 0.943,
"CCSD 3-21G*": 0.943,
"CCSD 6-31G": 0.943,
"CCSD 6-31G*": 0.944,
"CCSD 6-31G**": 0.933,
"CCSD 6-31+G**": 0.934,
"CCSD 6-311G*": 0.954,
"CCSD TZVP": 0.954,
"CCSD cc-pVDZ": 0.947,
"CCSD cc-pVTZ": 0.941,
"CCSD cc-pVQZ": 0.951,
"CCSD aug-cc-pVDZ": 0.963,
"CCSD aug-cc-pVTZ": 0.956,
"CCSD aug-cc-pVQZ": 0.953,
"CCSD=FULL 6-31G*": 0.95,
"CCSD=FULL TZVP": 0.948,
"CCSD=FULL cc-pVTZ": 0.948,
"CCSD=FULL aug-cc-pVTZ": 0.951,
}
element_list = [
["1 ", "H ", "Hydrogen"],
["2 ", "He", "Helium"],
["3 ", "Li", "Lithium"],
["4 ", "Be", "Beryllium"],
["5 ", "B ", "Boron"],
["6 ", "C ", "Carbon"],
["7 ", "N ", "Nitrogen"],
["8 ", "O ", "Oxygen"],
["9 ", "F ", "Fluorine"],
["10", "Ne", "Neon"],
["11", "Na", "Sodium"],
["12", "Mg", "Magnesium"],
["13", "Al", "Aluminum"],
["14", "Si", "Silicon"],
["15", "P ", "Phosphorus"],
["16", "S ", "Sulfur"],
["17", "Cl", "Chlorine"],
["18", "Ar", "Argon"],
["19", "K ", "Potassium"],
["20", "Ca", "Calcium"],
["21", "Sc", "Scandium"],
["22", "Ti", "Titanium"],
["23", "V ", "Vanadium"],
["24", "Cr", "Chromium"],
["25", "Mn", "Manganese"],
["26", "Fe", "Iron"],
["27", "Co", "Cobalt"],
["28", "Ni", "Nickel"],
["29", "Cu", "Copper"],
["30", "Zn", "Zinc"],
["31", "Ga", "Gallium"],
["32", "Ge", "Germanium"],
["33", "As", "Arsenic"],
["34", "Se", "Selenium"],
["35", "Br", "Bromine"],
["36", "Kr", "Krypton"],
["37", "Rb", "Rubidium"],
["38", "Sr", "Strontium"],
["39", "Y ", "Yttrium"],
["40", "Zr", "Zirconium"],
["41", "Nb", "Niobium"],
["42", "Mo", "Molybdenum"],
["43", "Tc", "Technetium"],
["44", "Ru", "Ruthenium"],
["45", "Rh", "Rhodium"],
["46", "Pd", "Palladium"],
["47", "Ag", "Silver"],
["48", "Cd", "Cadmium"],
["49", "In", "Indium"],
["50", "Sn", "Tin"],
["51", "Sb", "Antimony"],
["52", "Te", "Tellurium"],
["53", "I ", "Iodine"],
["54", "Xe", "Xenon"],
["55", "Cs", "Cesium"],
["56", "Ba", "Barium"],
["57", "La", "Lanthanum"],
["58", "Ce", "Cerium"],
["59", "Pr", "Praseodymium"],
["60", "Nd", "Neodymium"],
["61", "Pm", "Promethium"],
["62", "Sm", "Samarium"],
["63", "Eu", "Europium"],
["64", "Gd", "Gadolinium"],
["65", "Tb", "Terbium"],
["66", "Dy", "Dysprosium"],
["67", "Ho", "Holmium"],
["68", "Er", "Erbium"],
["69", "Tm", "Thulium"],
["70", "Yb", "Ytterbium"],
["71", "Lu", "Lutetium"],
["72", "Hf", "Hafnium"],
["73", "Ta", "Tantalum"],
["74", "W ", "Tungsten"],
["75", "Re", "Rhenium"],
["76", "Os", "Osmium"],
["77", "Ir", "Iridium"],
["78", "Pt", "Platinum"],
["79", "Au", "Gold"],
["80", "Hg", "Mercury"],
["81", "Tl", "Thallium"],
["82", "Pb", "Lead"],
["83", "Bi", "Bismuth"],
["84", "Po", "Polonium"],
["85", "At", "Astatine"],
["86", "Rn", "Radon"],
["87", "Fr", "Francium"],
["88", "Ra", "Radium"],
["89", "Ac", "Actinium"],
["90", "Th", "Thorium"],
["91", "Pa", "Protactinium"],
["92", "U ", "Uranium"],
["93", "Np", "Neptunium"],
["94", "Pu", "Plutonium"],
["95", "Am", "Americium"],
["96", "Cm", "Curium"],
["97", "Bk", "Berkelium"],
["98", "Cf", "Californium"],
["99", "Es", "Einsteinium"],
]
def get_vibrational_scaling(functional, basis_set):
"""
Returns vibrational scaling factor given the functional
and the basis set for the QM engine.
Parameters
----------
functional: str
Functional
basis_set: str
Basis set
Returns
-------
vib_scale: float
Vibrational scaling factor corresponding to the given
the basis_set and the functional.
Examples
--------
>>> get_vibrational_scaling("QCISD", "6-311G*")
0.957
"""
vib_scale = method_basis_scale_dict.get(functional + " " + basis_set)
return vib_scale
def unit_vector_N(u_BC, u_AB):
"""
Calculates unit normal vector perpendicular to plane ABC.
Parameters
----------
u_BC : (.. , 1, 3) array
Unit vector from atom B to atom C.
u_AB : (..., 1, 3) array
Unit vector from atom A to atom B.
Returns
-------
u_N : (..., 1, 3) array
Unit normal vector perpendicular to plane ABC.
Examples
--------
>>> u_BC = [0.34040355, 0.62192853, 0.27011169]
>>> u_AB = [0.28276792, 0.34232697, 0.02370306]
>>> unit_vector_N(u_BC, u_AB)
array([-0.65161629, 0.5726879 , -0.49741811])
"""
cross_product = np.cross(u_BC, u_AB)
norm_u_N = np.linalg.norm(cross_product)
u_N = cross_product / norm_u_N
return u_N
def delete_guest_angle_params(guest_qm_params_file="guest_qm_params.txt"):
"""
"""
f_params = open(guest_qm_params_file, "r")
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
lines_selected = lines_params[:to_begin] + lines_params[to_end + 1 :]
with open(guest_qm_params_file, "w") as f_:
f_.write("".join(lines_selected))
return
def remove_bad_angle_params(
guest_qm_params_file="guest_qm_params.txt", angle=1.00, k_angle=500):
with open(guest_qm_params_file, "r") as f_params:
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
lines_to_omit = []
for i in angle_params:
if float(re.findall(r"[-+]?\d+[.]?\d*", i)[0]) < float(angle) or float(
re.findall(r"[-+]?\d+[.]?\d*", i)[1]
) > float(k_angle):
lines_to_omit.append(i)
for b in lines_to_omit:
lines_params.remove(b)
with open(guest_qm_params_file, "w") as file:
for j in lines_params:
file.write(j)
def get_num_host_atoms(host_pdb):
"""
Reads the host PDB file and returns the
total number of atoms.
"""
ppdb = PandasPdb()
ppdb.read_pdb(host_pdb)
no_host_atoms = ppdb.df["ATOM"].shape[0]
return no_host_atoms
def change_names(inpcrd_file, prmtop_file, pdb_file):
command = "cp -r " + inpcrd_file + " system_qmmmrebind.inpcrd"
os.system(command)
command = "cp -r " + prmtop_file + " system_qmmmrebind.prmtop"
os.system(command)
command = "cp -r " + pdb_file + " system_qmmmrebind.pdb"
os.system(command)
def copy_file(source, destination):
"""
Copies a file from a source to the destination.
"""
shutil.copy(source, destination)
def get_openmm_energies(system_pdb, system_xml):
"""
Returns decomposed OPENMM energies for the
system.
Parameters
----------
system_pdb : str
Input PDB file
system_xml : str
Forcefield file in XML format
"""
pdb = simtk.openmm.app.PDBFile(system_pdb)
ff_xml_file = open(system_xml, "r")
system = simtk.openmm.XmlSerializer.deserialize(ff_xml_file.read())
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(
getEnergy=True, getParameters=True, getForces=True
)
force_group = []
for i, force in enumerate(system.getForces()):
force_group.append(force.__class__.__name__)
forcegroups = {}
for i in range(system.getNumForces()):
force = system.getForce(i)
force.setForceGroup(i)
forcegroups[force] = i
energies = {}
for f, i in forcegroups.items():
energies[f] = (
simulation.context.getState(getEnergy=True, groups=2 ** i)
.getPotentialEnergy()
._value
)
decomposed_energy = []
for key, val in energies.items():
decomposed_energy.append(val)
df_energy_openmm = pd.DataFrame(
list(zip(force_group, decomposed_energy)),
columns=["Energy_term", "Energy_openmm_params"],
)
energy_values = [
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicBondForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicAngleForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "PeriodicTorsionForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "NonbondedForce"
].values[0]
)[1],
]
energy_group = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_open_mm = pd.DataFrame(
list(zip(energy_group, energy_values)),
columns=["Energy_term", "Energy_openmm_params"],
)
df_energy_open_mm = df_energy_open_mm.set_index("Energy_term")
print(df_energy_open_mm)
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array which contains the coordinates of all
the N atoms.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def force_angle_constant(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14 of
Seminario calculation paper; returns angle (in kcal/mol/rad^2)
and equilibrium angle (in degrees).
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y and Z
coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
# Normal vector to angle plane found
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_u_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_u_PA
u_PC = np.cross(u_CB, u_N)
norm_u_PC = np.linalg.norm(u_PC)
u_PC = u_PC / norm_u_PC
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Scaling due to additional angles - Modified Seminario Part
sum_first = sum_first / scaling_1
sum_second = sum_second / scaling_2
# Added as two springs in series
k_theta = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta = 1 / k_theta
k_theta = -k_theta # Change to OPLS form
k_theta = abs(k_theta * 0.5) # Change to OPLS form
# Equilibrium Angle
theta_0 = math.degrees(math.acos(np.dot(u_AB, u_CB)))
# If the vectors u_CB and u_AB are linearly dependent u_N cannot be defined.
# This case is dealt with here :
if abs(sum((u_CB) - (u_AB))) < 0.01 or (
abs(sum((u_CB) - (u_AB))) > 1.99 and abs(sum((u_CB) - (u_AB))) < 2.01
):
scaling_1 = 1
scaling_2 = 1
[k_theta, theta_0] = force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
)
return k_theta, theta_0
def dot_product(u_PA, eig_AB):
"""
Returns the dot product of two vectors.
Parameters
----------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the
plane of A, B, C.
eig_AB : (..., 3, 3) array
Eigenvectors of the hessian matrix for
the bond AB.
"""
x = 0
for i in range(0, 3):
x = x + u_PA[i] * eig_AB[i].conjugate()
return x
def force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14
of Seminario calculation paper when the vectors
u_CB and u_AB are linearly dependent and u_N cannot
be defined. It instead takes samples of u_N across a
unit sphere for the calculation; returns angle
(in kcal/mol/rad^2) and equilibrium angle in degrees.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y,
and Z coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
k_theta_array = np.zeros((180, 360))
# Find force constant with varying u_N (with vector uniformly
# sampled across a sphere)
for theta in range(0, 180):
for phi in range(0, 360):
r = 1
u_N = [
r
* math.sin(math.radians(theta))
* math.cos(math.radians(theta)),
r
* math.sin(math.radians(theta))
* math.sin(math.radians(theta)),
r * math.cos(math.radians(theta)),
]
u_PA = np.cross(u_N, u_AB)
u_PA = u_PA / np.linalg.norm(u_PA)
u_PC = np.cross(u_CB, u_N)
u_PC = u_PC / np.linalg.norm(u_PC)
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Added as two springs in series
k_theta_ij = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta_ij = 1 / k_theta_ij
k_theta_ij = -k_theta_ij # Change to OPLS form
k_theta_ij = abs(k_theta_ij * 0.5) # Change to OPLS form
k_theta_array[theta, phi] = k_theta_ij
# Removes cases where u_N was linearly dependent of u_CB or u_AB.
# Force constant used is taken as the mean.
k_theta = np.mean(np.mean(k_theta_array))
# Equilibrium Angle independent of u_N
theta_0 = math.degrees(math.cos(np.dot(u_AB, u_CB)))
return k_theta, theta_0
def force_constant_bond(atom_A, atom_B, eigenvalues, eigenvectors, coords):
"""
Calculates the bond force constant for the bonds in the
molecule according to equation 10 of seminario paper,
given the bond atoms' indices and the corresponding
eigenvalues, eigenvectors and coordinates matrices.
Parameters
----------
atom_A : int
Index of Atom A.
atom_B : int
Index of Atom B.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing eigenvalues
of the hessian matrix, where N is the total number
of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing the
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y, and
Z coordinates of all N atoms.
Returns
--------
k_AB : float
Bond Force Constant value for the bond with atoms A and B.
"""
# Eigenvalues and eigenvectors calculated
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[:, :, atom_A, atom_B]
# Vector along bond
diff_AB = np.array(coords[atom_B, :]) - np.array(coords[atom_A, :])
norm_diff_AB = np.linalg.norm(diff_AB)
unit_vectors_AB = diff_AB / norm_diff_AB
k_AB = 0
# Projections of eigenvalues
for i in range(0, 3):
dot_product = abs(np.dot(unit_vectors_AB, eigenvectors_AB[:, i]))
k_AB = k_AB + (eigenvalues_AB[i] * dot_product)
k_AB = -k_AB * 0.5 # Convert to OPLS form
return k_AB
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array containing the coordinates of all the N atoms.
Returns
-------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the plane of A, B, C.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def reverse_list(lst):
"""
Returns the reversed form of a given list.
Parameters
----------
lst : list
Input list.
Returns
-------
reversed_list : list
Reversed input list.
Examples
--------
>>> lst = [5, 4, 7, 2]
>>> reverse_list(lst)
[2, 7, 4, 5]
"""
reversed_list = lst[::-1]
return reversed_list
def uniq(input_):
"""
Returns a list with only unique elements from a list
containing duplicate / repeating elements.
Parameters
----------
input_ : list
Input list.
Returns
-------
output : list
List with only unique elements.
Examples
--------
>>> lst = [2, 4, 2, 9, 10, 35, 10]
>>> uniq(lst)
[2, 4, 9, 10, 35]
"""
output = []
for x in input_:
if x not in output:
output.append(x)
return output
def search_in_file(file: str, word: str) -> list:
"""
Search for the given string in file and return lines
containing that string along with line numbers.
Parameters
----------
file : str
Input file.
word : str
Search word.
Returns
-------
list_of_results : list
List of lists with each element representing the
line number and the line contents.
"""
line_number = 0
list_of_results = []
with open(file, "r") as f:
for line in f:
line_number += 1
if word in line:
list_of_results.append((line_number, line.rstrip()))
return list_of_results
def list_to_dict(lst):
"""
Converts an input list with mapped characters (every
odd entry is the key of the dictionary and every
even entry adjacent to the odd entry is its correponding
value) to a dictionary.
Parameters
----------
lst : list
Input list.
Returns
-------
res_dct : dict
A dictionary with every element mapped with
its successive element starting from index 0.
Examples
--------
>>> lst = [5, 9, 3, 6, 2, 7]
>>> list_to_dict(lst)
{5: 9, 3: 6, 2: 7}
"""
res_dct = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
return res_dct
def scale_list(list_):
"""
Returns a scaled list with the minimum value
subtracted from each element of the corresponding list.
Parameters
----------
list_ : list
Input list.
Returns
-------
scaled_list : list
Scaled list.
Examples
--------
>>> list_ = [6, 3, 5, 11, 3, 2, 8, 6]
>>> scale_list(list_)
[4, 1, 3, 9, 1, 0, 6, 4]
"""
scaled_list = [i - min(list_) for i in list_]
return scaled_list
def list_kJ_kcal(list_):
"""
Convert the elements in the list from
kiloJoules units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of kJ.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_kJ_kcal(list_)
[1.4340344168260037, 0.7170172084130019, 1.1950286806883366]
"""
converted_list = [i / 4.184 for i in list_]
return converted_list
def list_hartree_kcal(list_):
"""
Convert the elements in the list from
hartree units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of hartree.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_hartree_kcal(list_)
[3765.0564000000004, 1882.5282000000002, 3137.547]
"""
converted_list = [i * 627.5094 for i in list_]
return converted_list
def torsiondrive_input_to_xyz(psi_input_file, xyz_file):
"""
Returns an xyz file from a torsiondrive formatted
input file.
Parameters
----------
psi_input_file : str
Input file for the psi4 QM engine.
xyz_file : str
XYZ format file to write the coords of the system.
"""
with open(psi_input_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "molecule {" in lines[i]:
to_begin = int(i)
if "set {" in lines[i]:
to_end = int(i)
xyz_lines = lines[to_begin + 2 : to_end - 1]
with open(xyz_file, "w") as f:
f.write(str(len(xyz_lines)) + "\n")
f.write(xyz_file + "\n")
for i in xyz_lines:
f.write(i)
def xyz_to_pdb(xyz_file, coords_file, template_pdb, system_pdb):
"""
Converts a XYZ file to a PDB file.
Parameters
----------
xyz_file : str
XYZ file containing the coordinates of the system.
coords_file : str
A text file containing the coordinates part of XYZ file.
template_pdb : str
A pdb file to be used as a template for the required PDB.
system_pdb : str
Output PDB file with the coordinates updated in the
template pdb using XYZ file.
"""
with open(xyz_file, "r") as f:
lines = f.readlines()
needed_lines = lines[2:]
with open(coords_file, "w") as f:
for i in needed_lines:
f.write(i)
df = pd.read_csv(coords_file, header=None, delimiter=r"\s+")
df.columns = ["atom", "x", "y", "z"]
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = df["x"]
ppdb.df["ATOM"]["y_coord"] = df["y"]
ppdb.df["ATOM"]["z_coord"] = df["z"]
ppdb.to_pdb(system_pdb)
def generate_xml_from_pdb_sdf(system_pdb, system_sdf, system_xml):
"""
Generates an openforcefield xml file from the pdb file.
Parameters
----------
system_pdb : str
Input PDB file.
system_sdf : str
SDF file of the system.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_sdf
os.system(command)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_charged_pdb_sdf(
system_pdb,
system_init_sdf,
system_sdf,
num_charge_atoms,
index_charge_atom_1,
charge_atom_1,
system_xml,
):
"""
Generates an openforcefield xml file from the pdb
file via SDF file and openforcefield.
Parameters
----------
system_pdb : str
Input PDB file.
system_init_sdf : str
SDF file for the system excluding charge information.
system_sdf : str
SDF file of the system.
num_charge_atoms : int
Total number of charged atoms in the PDB.
index_charge_atom_1 : int
Index of the first charged atom.
charge_atom_1 : float
Charge on first charged atom.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_init_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_init_sdf
os.system(command)
with open(system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(num_charge_atoms)
+ " "
+ str(index_charge_atom_1)
+ " "
+ str(charge_atom_1)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def get_dihedrals(qm_scan_file):
"""
Returns dihedrals from the torsiondrive scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
dihedrals : list
List of all the dihedral values from the qm scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
return dihedrals
def get_qm_energies(qm_scan_file):
"""
Returns QM optimized energies from the torsiondrive
scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
qm_energies : list
List of all the qm optimiseed energies extracted from the torsiondrive
scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
qm_energies = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
energy = float(energy_dihedral[1])
qm_energies.append(energy)
return qm_energies
def generate_mm_pdbs(qm_scan_file, template_pdb):
"""
Generate PDBs from the torsiondrive scan file
based on a template PDB.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
lines_markers = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
lines_markers.append(i)
lines_markers.append(len(lines) + 1)
for i in range(len(lines_markers) - 1):
# pdb_file_to_write = str(dihedrals[i]) + ".pdb"
if dihedrals[i] > 0:
pdb_file_to_write = "plus_" + str(abs(dihedrals[i])) + ".pdb"
if dihedrals[i] < 0:
pdb_file_to_write = "minus_" + str(abs(dihedrals[i])) + ".pdb"
to_begin = lines_markers[i]
to_end = lines_markers[i + 1]
lines_to_write = lines[to_begin + 1 : to_end - 1]
x_coords = []
y_coords = []
z_coords = []
for i in lines_to_write:
coordinates = i
coordinates = re.findall(r"[-+]?\d+[.]?\d*", coordinates)
x = float(coordinates[0])
y = float(coordinates[1])
z = float(coordinates[2])
x_coords.append(x)
y_coords.append(y)
z_coords.append(z)
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = x_coords
ppdb.df["ATOM"]["y_coord"] = y_coords
ppdb.df["ATOM"]["z_coord"] = z_coords
ppdb.to_pdb(pdb_file_to_write)
def remove_mm_files(qm_scan_file):
"""
Delete all generated PDB files.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
command = "rm -rf " + i
os.system(command)
command = "rm -rf " + i[:-4] + ".inpcrd"
os.system(command)
command = "rm -rf " + i[:-4] + ".prmtop"
os.system(command)
def get_non_torsion_mm_energy(system_pdb, load_topology, system_xml):
"""
Returns sum of all the non-torsional energies (that
includes HarmonicBondForce, HarmonicAngleForce
and NonBondedForce) of the system from the PDB
file given the topology and the forcefield file.
Parameters
----------
system_pdb : str
System PDB file to load the openmm system topology
and coordinates.
load_topology : {"openmm", "parmed"}
Argument to specify how to load the topology.
system_xml : str
XML force field file for the openmm system.
Returns
-------
Sum of all the non-torsional energies of the system.
"""
system_prmtop = system_pdb[:-4] + ".prmtop"
system_inpcrd = system_pdb[:-4] + ".inpcrd"
if load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(system_pdb, structure=True).topology,
parmed.load_file(system_xml),
)
if load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(system_pdb).topology,
parmed.load_file(system_xml),
)
openmm_system.save(system_prmtop, overwrite=True)
openmm_system.coordinates = parmed.load_file(
system_pdb, structure=True
).coordinates
openmm_system.save(system_inpcrd, overwrite=True)
parm = parmed.load_file(system_prmtop, system_inpcrd)
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
# print(prmtop_energy_decomposition)
prmtop_energy_decomposition_value_no_torsion = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
return sum(prmtop_energy_decomposition_value_no_torsion)
def get_mm_potential_energies(qm_scan_file, load_topology, system_xml):
"""
Returns potential energy of the system from the PDB file
given the topology and the forcefield file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to spcify how to load the topology.
system_xml : str
XML file to load the openmm system.
Returns
-------
mm_potential_energies : list
List of all the non torsion mm energies for the
generated PDB files.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
mm_pdb_file = i
mm_potential_energies = []
for i in mm_pdb_list:
mm_pdb_file = i
mm_energy = get_non_torsion_mm_energy(
system_pdb=i, load_topology=load_topology, system_xml=system_xml,
)
mm_potential_energies.append(mm_energy)
return mm_potential_energies
def list_diff(list_1, list_2):
"""
Returns the difference between two lists as a list.
Parameters
----------
list_1 : list
First list
list_2 : list
Second list.
Returns
-------
diff_list : list
List containing the diferences between the elements of
the two lists.
Examples
--------
>>> list_1 = [4, 2, 8, 3, 0, 6, 7]
>>> list_2 = [5, 3, 1, 5, 6, 0, 4]
>>> list_diff(list_1, list_2)
[-1, -1, 7, -2, -6, 6, 3]
"""
diff_list = []
zipped_list = zip(list_1, list_2)
for list1_i, list2_i in zipped_list:
diff_list.append(list1_i - list2_i)
return diff_list
def dihedral_energy(x, k1, k2, k3, k4=0):
"""
Expression for the dihedral energy.
"""
energy_1 = k1 * (1 + np.cos(1 * x * 0.01745))
energy_2 = k2 * (1 - np.cos(2 * x * 0.01745))
energy_3 = k3 * (1 + np.cos(3 * x * 0.01745))
energy_4 = k4 * (1 - np.cos(4 * x * 0.01745))
dihedral_energy = energy_1 + energy_2 + energy_3 + energy_4
return dihedral_energy
def error_function(delta_qm, delta_mm):
"""
Root Mean Squared Error.
"""
squared_error = np.square(np.subtract(delta_qm, delta_mm))
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def error_function_boltzmann(delta_qm, delta_mm, T):
"""
Boltzmann Root Mean Squared Error.
"""
kb = 3.297623483 * 10 ** (-24) # in cal/K
delta_qm_boltzmann_weighted = [np.exp(-i / (kb * T)) for i in delta_qm]
squared_error = (
np.square(np.subtract(delta_qm, delta_mm))
* delta_qm_boltzmann_weighted
)
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def gen_init_guess(qm_scan_file, load_topology, system_xml):
"""
Initial guess for the torsional parameter.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to speify how to load the topology.
system_xml : str
XML force field file for the system.
Returns
-------
k_init_guess : list
Initial guess for the torsional parameters.
"""
x = get_dihedrals(qm_scan_file)
y = scale_list(
list_=get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
)
init_vals = [0.0, 0.0, 0.0, 0.0]
k_init_guess, covar = scipy.optimize.curve_fit(
dihedral_energy, x, y, p0=init_vals
)
for i in range(len(k_init_guess)):
if k_init_guess[i] < 0:
k_init_guess[i] = 0
return k_init_guess
def objective_function(k_array, x, delta_qm):
"""
Objective function for the torsional parameter fitting.
"""
delta_mm = dihedral_energy(
x, k1=k_array[0], k2=k_array[1], k3=k_array[2], k4=k_array[3]
)
loss_function = error_function(delta_qm, delta_mm)
return loss_function
def fit_params(qm_scan_file, load_topology, system_xml, method):
"""
Optimization of the objective function.
"""
k_guess = gen_init_guess(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
x_data = np.array(get_dihedrals(qm_scan_file))
delta_qm = np.array(
scale_list(list_hartree_kcal(list_=get_qm_energies(qm_scan_file)))
)
optimise = scipy.optimize.minimize(
objective_function,
k_guess,
args=(x_data, delta_qm),
method=method,
bounds=[(0.00, None), (0.00, None), (0.00, None), (0.00, None),],
)
return optimise.x
def get_tor_params(
qm_scan_file, template_pdb, load_topology, system_xml, method
):
"""
Returns the fitted torsional parameters.
"""
qm_e = get_qm_energies(qm_scan_file=qm_scan_file)
qm_e_kcal = list_hartree_kcal(qm_e)
delta_qm = scale_list(qm_e_kcal)
generate_mm_pdbs(qm_scan_file=qm_scan_file, template_pdb=template_pdb)
mm_pe_no_torsion_kcal = get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
delta_mm = scale_list(mm_pe_no_torsion_kcal)
opt_param = fit_params(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
return opt_param
def get_torsional_lines(
template_pdb,
system_xml,
qm_scan_file,
load_topology,
method,
dihedral_text_file,
):
"""
Returns the torsional lines for the XML forcefield file.
"""
opt_param = get_tor_params(
qm_scan_file=qm_scan_file,
template_pdb=template_pdb,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
dihedral_text = open(dihedral_text_file, "r")
dihedral_text_lines = dihedral_text.readlines()
atom_numbers = dihedral_text_lines[-1]
atom_index_from_1 = [
int(re.findall(r"\d+", atom_numbers)[0]),
int(re.findall(r"\d+", atom_numbers)[1]),
int(re.findall(r"\d+", atom_numbers)[2]),
int(re.findall(r"\d+", atom_numbers)[3]),
]
atom_index = [i - 1 for i in atom_index_from_1]
atom_index_lines = (
" "
+ "p1="
+ '"'
+ str(atom_index[0])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(atom_index[1])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(atom_index[2])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(atom_index[3])
+ '"'
+ " "
)
tor_lines = []
for i in range(len(opt_param)):
line_to_append = (
" "
+ "<Torsion "
+ "k="
+ '"'
+ str(round(opt_param[i], 8))
+ '"'
+ atom_index_lines
+ "periodicity="
+ '"'
+ str(i + 1)
+ '"'
+ " "
+ "phase="
+ '"'
+ "0"
+ '"'
+ "/>"
)
# print(line_to_append)
tor_lines.append(line_to_append)
return tor_lines
def singular_resid(pdbfile, qmmmrebind_init_file):
"""
Returns a PDB file with chain ID = A
Parameters
----------
pdbfile: str
Input PDB file
qmmmrebind_init_file: str
Output PDB file
"""
ppdb = PandasPdb().read_pdb(pdbfile)
ppdb.df["HETATM"]["chain_id"] = "A"
ppdb.df["ATOM"]["chain_id"] = "A"
ppdb.to_pdb(
path=qmmmrebind_init_file, records=None, gz=False, append_newline=True
)
def relax_init_structure(
pdbfile,
prmtopfile,
qmmmrebindpdb,
sim_output="output.pdb",
sim_steps=100000,
):
"""
Minimizing the initial PDB file with the given topology
file
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile : str
Input prmtop file.
qmmmrebind_init_file: str
Output PDB file.
sim_output: str
Simulation output trajectory file.
sim_steps: int
MD simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedMethod=simtk.openmm.app.PME,
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=10000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(qmmmrebindpdb, sim_steps)
)
simulation.step(sim_steps)
command = "rm -rf " + sim_output
os.system(command)
def truncate(x):
"""
Returns a float or an integer with an exact number
of characters.
Parameters
----------
x: str
input value
"""
if len(str(int(float(x)))) == 1:
x = format(x, ".8f")
if len(str(int(float(x)))) == 2:
x = format(x, ".7f")
if len(str(int(float(x)))) == 3:
x = format(x, ".6f")
if len(str(int(float(x)))) == 4:
x = format(x, ".5f")
if len(str(x)) > 10:
x = round(x, 10)
return x
def add_vectors_inpcrd(pdbfile, inpcrdfile):
"""
Adds periodic box dimensions to the inpcrd file
Parameters
----------
pdbfile: str
PDB file containing the periodic box information.
inpcrdfile: str
Input coordinate file.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
line_to_add = (
" "
+ truncate(vector_list[0])
+ " "
+ truncate(vector_list[1])
+ " "
+ truncate(vector_list[2])
+ " "
+ truncate(vector_list[3])
+ " "
+ truncate(vector_list[4])
+ " "
+ truncate(vector_list[5])
)
print(line_to_add)
with open(inpcrdfile, "a+") as f:
f.write(line_to_add)
def add_dim_prmtop(pdbfile, prmtopfile):
"""
Adds periodic box dimensions flag in the prmtop file.
Parameters
----------
prmtopfile: str
Input prmtop file.
pdbfile: str
PDB file containing the periodic box information.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
vector_list = [i / 10 for i in vector_list]
vector_list = [truncate(i) for i in vector_list]
vector_list = [i + "E+01" for i in vector_list]
line3 = (
" "
+ vector_list[3]
+ " "
+ vector_list[0]
+ " "
+ vector_list[1]
+ " "
+ vector_list[2]
)
print(line3)
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
with open(prmtopfile) as f1, open("intermediate.prmtop", "w") as f2:
for line in f1:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f2.write(line)
command = "rm -rf " + prmtopfile
os.system(command)
command = "mv intermediate.prmtop " + prmtopfile
os.system(command)
def add_period_prmtop(parm_file, ifbox):
"""
Changes the value of IFBOX if needed for the prmtop / parm file.
Set to 1 if standard periodic box and 2 when truncated octahedral.
"""
with open(parm_file) as f:
parm_lines = f.readlines()
lines_contain = []
for i in range(len(parm_lines)):
if parm_lines[i].startswith("%FLAG POINTERS"):
lines_contain.append(i + 4)
line = parm_lines[lines_contain[0]]
line_new = "%8s %6s %6s %6s %6s %6s %6s %6s %6s %6s" % (
re.findall(r"\d+", line)[0],
re.findall(r"\d+", line)[1],
re.findall(r"\d+", line)[2],
re.findall(r"\d+", line)[3],
re.findall(r"\d+", line)[4],
re.findall(r"\d+", line)[5],
re.findall(r"\d+", line)[6],
str(ifbox),
re.findall(r"\d+", line)[8],
re.findall(r"\d+", line)[9],
)
parm_lines[lines_contain[0]] = line_new + "\n"
with open(parm_file, "w") as f:
for i in parm_lines:
f.write(i)
def add_solvent_pointers_prmtop(non_reparams_file, reparams_file):
"""
Adds the flag solvent pointers to the topology file.
"""
f_non_params = open(non_reparams_file, "r")
lines_non_params = f_non_params.readlines()
for i in range(len(lines_non_params)):
if "FLAG SOLVENT_POINTERS" in lines_non_params[i]:
to_begin = int(i)
solvent_pointers = lines_non_params[to_begin : to_begin + 3]
file = open(reparams_file, "a")
for i in solvent_pointers:
file.write(i)
def prmtop_calibration(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
):
"""
Standardizes the topology files
Parameters
----------
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
"""
parm = parmed.load_file(prmtopfile, inpcrdfile)
parm_1 = parmed.tools.actions.changeRadii(parm, "mbondi3")
parm_1.execute()
parm_2 = parmed.tools.actions.setMolecules(parm)
parm_2.execute()
parm.save(prmtopfile, overwrite=True)
def run_openmm_prmtop_inpcrd(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with inpcrd and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
inpcrd = simtk.openmm.app.AmberInpcrdFile(inpcrdfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
if inpcrd.boxVectors is None:
add_vectors_inpcrd(
pdbfile=pdbfile, inpcrdfile=inpcrdfile,
)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
print(inpcrd.boxVectors)
simulation.context.setPositions(inpcrd.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def run_openmm_prmtop_pdb(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with pdb and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def move_qmmmmrebind_files(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
pdbfile="system_qmmmrebind.pdb",
):
"""
Moves QMMMReBind generated topology and parameter files
to a new directory .
Parameters
----------
prmtopfile: str
QMMMReBind generated prmtop file.
inpcrdfile: str
QMMMReBind generated inpcrd file.
pdbfile: str
QMMMReBind generated PDB file.
"""
current_pwd = os.getcwd()
command = "rm -rf reparameterized_files"
os.system(command)
command = "mkdir reparameterized_files"
os.system(command)
shutil.copy(
current_pwd + "/" + prmtopfile,
current_pwd + "/" + "reparameterized_files" + "/" + prmtopfile,
)
shutil.copy(
current_pwd + "/" + inpcrdfile,
current_pwd + "/" + "reparameterized_files" + "/" + inpcrdfile,
)
shutil.copy(
current_pwd + "/" + pdbfile,
current_pwd + "/" + "reparameterized_files" + "/" + pdbfile,
)
def move_qm_files():
"""
Moves QM engine generated files to a new directory .
"""
current_pwd = os.getcwd()
command = "rm -rf qm_data"
os.system(command)
command = "mkdir qm_data"
os.system(command)
command = "cp -r " + "*.com* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.log* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.chk* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.fchk* " + current_pwd + "/" + "qm_data"
os.system(command)
def move_qmmmrebind_files():
"""
Moves all QMMMREBind files to a new directory.
"""
current_pwd = os.getcwd()
command = "rm -rf qmmmrebind_data"
os.system(command)
command = "mkdir qmmmrebind_data"
os.system(command)
command = "mv " + "*.sdf* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.txt* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.pdb* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xml* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.chk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.fchk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.com* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.log* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.inpcrd* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.prmtop* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.parm7* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.out* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*run_command* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.dat* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xyz* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
class PrepareQMMM:
"""
A class used to segregate the QM and MM regions.
This class contain methods to remove the solvent, ions and all
entities that are exclusive of receptor and the ligand. It also
defines the Quantum Mechanical (QM) region and the Molecular
Mechanical (MM) region based upon the distance of the ligand
from the receptor and the chosen number of receptor residues. It
is also assumed that the initial PDB file will have the receptor
followed by the ligand.
...
Attributes
----------
init_pdb : str
Initial PDB file containing the receptor-ligand complex with
solvent, ions, etc.
cleaned_pdb : str
Formatted PDB file containing only the receptor and the ligand.
guest_init_pdb : str
A separate ligand PDB file with atom numbers not beginning from 1.
host_pdb : str
A separate receptor PDB file with atom numbers beginning from 1.
guest_resname : str
Three letter residue ID for the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
guest_xyz : str, optional
A text file of the XYZ coordinates of the ligand.
distance : float, optional
The distance required to define the QM region of the receptor.
This is the distance between the atoms of the ligand and the
atoms of the receptor.
residue_list : str, optional
A text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
host_qm_atoms : str, optional
A text file of the atom numbers of the receptors in the QM
region.
host_mm_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region (all atoms except atoms in the QM region)
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
host_mm_pdb : str, optional
PDB file for the receptor's MM region.
qm_pdb : str, optional
PDB file for the QM region (receptor's QM region and the
ligand).
mm_pdb : str, optional
PDB file for the MM region.
host_mm_region_I_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region preceeding the QM region.
host_mm_region_II_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region following the QM region.
host_mm_region_I_pdb : str, optional
PDB file of the receptor in the MM region preceeding the
QM region.
host_mm_region_II_pdb : str, optional
PDB file of the receptor in the MM region following the
QM region.
num_residues : int, optional
Number of residues required in the QM region of the receptor.
"""
def __init__(
self,
init_pdb,
distance,
num_residues,
guest_resname,
cleaned_pdb="system.pdb",
guest_init_pdb="guest_init.pdb",
host_pdb="host.pdb",
guest_pdb="guest_init_II.pdb",
guest_xyz="guest_coord.txt",
residue_list="residue_list.txt",
host_qm_atoms="host_qm.txt",
host_mm_atoms="host_mm.txt",
host_qm_pdb="host_qm.pdb",
host_mm_pdb="host_mm.pdb",
qm_pdb="qm.pdb",
mm_pdb="mm.pdb",
host_mm_region_I_atoms="host_mm_region_I.txt",
host_mm_region_II_atoms="host_mm_region_II.txt",
host_mm_region_I_pdb="host_mm_region_I.pdb",
host_mm_region_II_pdb="host_mm_region_II.pdb",
):
self.init_pdb = init_pdb
self.distance = distance
self.num_residues = num_residues
self.guest_resname = guest_resname
self.cleaned_pdb = cleaned_pdb
self.guest_init_pdb = guest_init_pdb
self.host_pdb = host_pdb
self.guest_pdb = guest_pdb
self.guest_xyz = guest_xyz
self.residue_list = residue_list
self.host_qm_atoms = host_qm_atoms
self.host_mm_atoms = host_mm_atoms
self.host_qm_pdb = host_qm_pdb
self.host_mm_pdb = host_mm_pdb
self.qm_pdb = qm_pdb
self.mm_pdb = mm_pdb
self.host_mm_region_I_atoms = host_mm_region_I_atoms
self.host_mm_region_II_atoms = host_mm_region_II_atoms
self.host_mm_region_I_pdb = host_mm_region_I_pdb
self.host_mm_region_II_pdb = host_mm_region_II_pdb
def clean_up(self):
"""
Reads the given PDB file, removes all entities except the
receptor and ligand and saves a new pdb file.
"""
ions = [
"Na+",
"Cs+",
"K+",
"Li+",
"Rb+",
"Cl-",
"Br-",
"F-",
"I-",
"Ca2",
]
intermediate_file_1 = self.cleaned_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.cleaned_pdb[:-4] + "_intermediate_2.pdb"
command = (
"pdb4amber -i "
+ self.init_pdb
+ " -o "
+ intermediate_file_1
+ " --noter --dry"
)
os.system(command)
to_delete = (
intermediate_file_1[:-4] + "_nonprot.pdb",
intermediate_file_1[:-4] + "_renum.txt",
intermediate_file_1[:-4] + "_sslink",
intermediate_file_1[:-4] + "_water.pdb",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_1) as f1, open(
intermediate_file_2, "w") as f2:
for line in f1:
if not any(ion in line for ion in ions):
f2.write(line)
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.cleaned_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def create_host_guest(self):
"""
Saves separate receptor and ligand PDB files.
"""
with open(self.cleaned_pdb) as f1, open(self.host_pdb, "w") as f2:
for line in f1:
if not self.guest_resname in line and not "CRYST1" in line:
f2.write(line)
with open(self.cleaned_pdb) as f1, open(
self.guest_init_pdb, "w"
) as f2:
for line in f1:
if self.guest_resname in line or "END" in line:
f2.write(line)
def realign_guest(self):
"""
Saves a ligand PDB file with atom numbers beginning from 1.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_init_pdb)
to_subtract = min(ppdb.df["ATOM"]["atom_number"]) - 1
ppdb.df["ATOM"]["atom_number"] = (
ppdb.df["ATOM"]["atom_number"] - to_subtract
)
intermediate_file_1 = self.guest_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.guest_pdb[:-4] + "_intermediate_2.pdb"
ppdb.to_pdb(path=intermediate_file_1)
command = (
"pdb4amber -i "
+ intermediate_file_1
+ " -o "
+ intermediate_file_2
)
os.system(command)
to_delete = (
intermediate_file_2[:-4] + "_nonprot.pdb",
intermediate_file_2[:-4] + "_renum.txt",
intermediate_file_2[:-4] + "_sslink",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.guest_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def get_guest_coord(self):
"""
Saves a text file of the XYZ coordinates of the ligand.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
xyz = ppdb.df["ATOM"][["x_coord", "y_coord", "z_coord"]]
xyz_to_list = xyz.values.tolist()
np.savetxt(self.guest_xyz, xyz_to_list)
def get_qm_resids(self):
"""
Saves a text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
"""
guest_coord_list = np.loadtxt(self.guest_xyz)
host_atom_list = []
for i in range(len(guest_coord_list)):
reference_point = guest_coord_list[i]
# TODO: move reads outside of loop
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
distances = ppdb.distance(xyz=reference_point, records=("ATOM"))
all_within_distance = ppdb.df["ATOM"][
distances < float(self.distance)
]
host_df = all_within_distance["atom_number"]
host_list = host_df.values.tolist()
host_atom_list.append(host_list)
host_atom_list = list(itertools.chain(*host_atom_list))
host_atom_list = set(host_atom_list)
host_atom_list = list(host_atom_list)
host_atom_list.sort()
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
index_list = []
for i in host_atom_list:
indices = np.where(df["atom_number"] == i)
indices = list(indices)[0]
indices = list(indices)
index_list.append(indices)
index_list = list(itertools.chain.from_iterable(index_list))
df1 = df.iloc[
index_list,
]
# TODO: make it write list of integers
resid_num = list(df1.residue_number.unique())
np.savetxt(self.residue_list, resid_num, fmt="%i")
def get_host_qm_mm_atoms(self):
"""
Saves a text file of the atom numbers of the receptors in the QM
region and MM region separately.
"""
resid_num = np.loadtxt(self.residue_list)
# approximated_res_list = [int(i) for i in resid_num]
approximated_res_list = []
# TODO: what is this doing?
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
host_index_nested_list = []
for i in approximated_res_list:
indices = np.where(df["residue_number"] == i)
#TODO: the program seems to error when this line is removed, which
# makes no sense.
indices = list(indices)[0]
indices = list(indices)
host_index_nested_list.append(indices)
host_index_list = list(
itertools.chain.from_iterable(host_index_nested_list)
)
df_atom = df.iloc[host_index_list]
df_atom_number = df_atom["atom_number"]
host_atom_list = df_atom_number.values.tolist()
selected_atoms = []
selected_atoms.extend(host_atom_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
non_selected_atoms = list(set(len_atoms).difference(selected_atoms))
assert len(non_selected_atoms) + len(selected_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_qm_atoms, selected_atoms, fmt="%i")
np.savetxt(self.host_mm_atoms, non_selected_atoms, fmt="%i")
def save_host_pdbs(self):
"""
Saves a PDB file for the receptor's QM region and MM
region separately.
"""
selected_atoms = np.loadtxt(self.host_qm_atoms)
# TODO: not necessary if savetxt writes in integers
selected_atoms = [int(i) for i in selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_pdb, records=None, gz=False, append_newline=True,
)
non_selected_atoms = np.loadtxt(self.host_mm_atoms)
non_selected_atoms = [int(i) for i in non_selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in non_selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_qm_pdb, records=None, gz=False, append_newline=True,
)
def get_host_mm_region_atoms(self):
"""
Saves a text file for the atoms of the receptor's MM region
preceding the QM region and saves another text file for the
atoms of the receptor's MM region folllowing the QM region.
"""
resid_num = np.loadtxt(self.residue_list)
approximated_res_list = []
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
# print(approximated_res_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["residue_number"]]
res_list = list(set(df["residue_number"].to_list()))
res_mm_list = list(set(res_list).difference(approximated_res_list))
# print(res_mm_list)
res_mm_region_I_list = []
# TODO: This can probably be made into a single loop by comparing i
# to the maximum value within approximated_res_list
for i in res_mm_list:
for j in approximated_res_list:
if i < j:
res_mm_region_I_list.append(i)
res_mm_region_I_list = list(set(res_mm_region_I_list))
res_mm_region_II_list = list(
set(res_mm_list).difference(res_mm_region_I_list)
)
# print(res_mm_region_II_list)
ppdb.read_pdb(self.host_mm_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
mm_region_I_index_nested_list = []
for i in res_mm_region_I_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_I_index_nested_list.append(indices)
mm_region_I_index_list = list(
itertools.chain.from_iterable(mm_region_I_index_nested_list)
)
df_atom = df.iloc[mm_region_I_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_I_atom_list = df_atom_number.values.tolist()
mm_region_I_atoms = []
mm_region_I_atoms.extend(mm_region_I_atom_list)
mm_region_II_index_nested_list = []
for i in res_mm_region_II_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_II_index_nested_list.append(indices)
mm_region_II_index_list = list(
itertools.chain.from_iterable(mm_region_II_index_nested_list)
)
df_atom = df.iloc[mm_region_II_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_II_atom_list = df_atom_number.values.tolist()
mm_region_II_atoms = []
mm_region_II_atoms.extend(mm_region_II_atom_list)
ppdb.read_pdb(self.host_mm_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
assert len(mm_region_I_atoms) + len(mm_region_II_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_mm_region_I_atoms, mm_region_I_atoms, fmt="%i")
np.savetxt(self.host_mm_region_II_atoms, mm_region_II_atoms, fmt="%i")
def save_host_mm_regions_pdbs(self):
"""
Saves a PDB file for the receptor's MM region preceding
the QM region and saves another PDB file for the receptor's
MM region folllowing the QM region.
"""
mm_region_I_atoms = np.loadtxt(self.host_mm_region_I_atoms)
mm_region_I_atoms = [int(i) for i in mm_region_I_atoms]
mm_region_II_atoms = np.loadtxt(self.host_mm_region_II_atoms)
mm_region_II_atoms = [int(i) for i in mm_region_II_atoms]
# NOTE: this is a slightly confusing way to define the atoms to
# write to a PDB - the members that are *not* in a section, rather
# than the members that are.
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_II_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_I_pdb,
records=None,
gz=False,
append_newline=True,
)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_I_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_II_pdb,
records=None,
gz=False,
append_newline=True,
)
def get_qm_mm_regions(self):
"""
Saves separate PDB files for the QM and MM regions.
QM regions comprise the QM region of the receptor
and the entire ligand where the MM region comprise
the non-selected QM regions of the receptor.
"""
with open(self.host_qm_pdb) as f1, open(self.qm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
with open(self.guest_pdb) as f1, open(self.qm_pdb, "a") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
with open(self.host_mm_pdb) as f1, open(self.mm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
class PrepareGaussianGuest:
"""
A class used to prepare the QM engine input file (Gaussian)
for the ligand and run QM calculations with appropriate
keywords.
This class contain methods to write an input file (.com extension)
for the QM engine. It then runs a QM calculation with the given
basis set and functional. Checkpoint file is then converted to
a formatted checkpoint file. Output files (.log, .chk, and .fhck)
will then be used to extract ligand's force field parameters.
...
Attributes
----------
charge : int, optional
Charge of the ligand.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_out_file: str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_out_file: str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="OPT",
frequency="FREQ",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6)",
gauss_out_file="guest.out",
fchk_out_file="guest_fchk.out",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.gauss_out_file = gauss_out_file
self.fchk_out_file = fchk_out_file
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
def write_input(self):
"""
Writes a Gaussian input file for the ligand.
"""
command_line_1 = "%Chk = " + self.guest_pdb[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = self.guest_pdb[:-4] + " " + "gaussian input file"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_2 = df[["x_coord", "y_coord", "z_coord"]]
df_merged = pd.concat([df_1, df_2], axis=1)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.guest_pdb[:-4] + ".com", "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand locally.
"""
execute_command = (
"g16"
+ " < "
+ self.guest_pdb[:-4]
+ ".com"
+ " > "
+ self.guest_pdb[:-4]
+ ".log"
)
with open(self.gauss_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.guest_pdb[:-4]
+ ".chk"
+ " "
+ self.guest_pdb[:-4]
+ ".fchk"
)
with open(self.fchk_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
class PrepareGaussianHostGuest:
"""
A class used to prepare the QM engine input file (Gaussian) for
the receptor - ligand complex and run the QM calculations with
the appropriate keywords.
This class contain methods to write an input file (.com extension)
for the QM engine for the receptor - ligand complex. It then runs
a QM calculation with the given basis set and functional. Checkpoint
file is then converted to a formatted checkpoint file. Output files
(.log, .chk, and .fhck) will then be used to extract charges for the
ligand and the receptor.
...
Attributes
----------
charge : int, optional
Total charge of the receptor - ligand complex.
multiplicity : int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_system_out_file : str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_system_out_file : str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
host_guest_input : str, optional
Gaussian input file (.com extension) for the receptor - ligand
QM region.
qm_guest_charge_parameter_file : str, optional
File containing the charges of ligand atoms and their corresponding
atoms. Charge obtained are the polarised charged due to the
surrounding receptor's region.
qm_host_charge_parameter_file : str, optional
File containing the charges of the QM region of the receptor.
qm_guest_atom_charge_parameter_file : str, optional
File containing the charges of ligand atoms. Charge obtained
are the polarised charged due to the surrounding receptor's region.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
host_qm_pdb="host_qm.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="",
frequency="",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6) SCRF=PCM",
gauss_system_out_file="system_qm.out",
fchk_system_out_file="system_qm_fchk.out",
host_guest_input="host_guest.com",
qm_guest_charge_parameter_file="guest_qm_surround_charges.txt",
qm_host_charge_parameter_file="host_qm_surround_charges.txt",
qm_guest_atom_charge_parameter_file="guest_qm_atom_surround_charges.txt",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.host_qm_pdb = host_qm_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
self.gauss_system_out_file = gauss_system_out_file
self.fchk_system_out_file = fchk_system_out_file
self.host_guest_input = host_guest_input
self.qm_guest_charge_parameter_file = qm_guest_charge_parameter_file
self.qm_host_charge_parameter_file = qm_host_charge_parameter_file
self.qm_guest_atom_charge_parameter_file = (
qm_guest_atom_charge_parameter_file
)
def write_input(self):
"""
Writes a Gaussian input file for the receptor - ligand QM region.
"""
command_line_1 = "%Chk = " + self.host_guest_input[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = "Gaussian Input File"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_1 = | pd.concat([df_1, df_2, df_3], axis=1) | pandas.concat |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.toolkits import power_curve
from operational_analysis.toolkits.power_curve.parametric_forms import *
noise = 0.1
class TestPowerCurveFunctions(unittest.TestCase):
def setUp(self):
np.random.seed(42)
params = [1300, -7, 11, 2, 0.5]
self.x = pd.Series(np.random.random(100) * 30)
self.y = pd.Series(logistic5param(self.x, *params) + np.random.random(100) * noise)
def test_IEC(self):
# Create test data using logistic5param form
curve = power_curve.IEC(self.x, self.y)
y_pred = curve(self.x)
# Does the IEC power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=1, atol=noise * 2, err_msg="Power curve did not properly fit.")
def test_logistic_5_param(self):
# Create test data using logistic5param form
curve = power_curve.logistic_5_parametric(self.x, self.y)
y_pred = curve(self.x)
# Does the logistic-5 power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=1, atol=noise * 2, err_msg="Power curve did not properly fit.")
def test_gam(self):
# Create test data using logistic5param form
curve = power_curve.gam(windspeed_column = self.x, power_column = self.y, n_splines = 20)
y_pred = curve(self.x)
# Does the spline-fit power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=0.05, atol = 20, err_msg="Power curve did not properly fit.")
def test_3paramgam(self):
# Create test data using logistic5param form
winddir = np.random.random(100)
airdens = np.random.random(100)
curve = power_curve.gam_3param(windspeed_column = self.x, winddir_column=winddir, airdens_column=airdens, power_column = self.y, n_splines = 20)
y_pred = curve(self.x, winddir, airdens)
# Does the spline-fit power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=0.05, atol = 20, err_msg="Power curve did not properly fit.")
def tearDown(self):
pass
class TestParametricForms(unittest.TestCase):
def setUp(self):
pass
def test_logistic5parameter(self):
y_pred = logistic5param(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5])
y = np.array([2.29403585, 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
y_pred = logistic5param(np.array([1, 2, 3]), *[1300., -7., 11., 2., 0.5])
y = np.array([2.29403585, 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle integer inputs properly.")
y_pred = logistic5param(np.array([0.01, 0.0]), 1300, 7, 11, 2, 0.5)
y = np.array([ 1300.0 , 1300.0 ])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle zero properly (b>0).")
y_pred = logistic5param(np.array([0.01, 0.0]), 1300, -7, 11, 2, 0.5)
y = np.array([ 2.0 , 2.0 ])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle zero properly (b<0).")
def test_logistic5parameter_capped(self):
# Numpy array + Lower Bound
y_pred = logistic5param_capped(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=20.)
y = np.array([5., 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Numpy array + Upper and Lower Bound
y_pred = logistic5param_capped(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=10.)
y = np.array([5., 5.32662505, 10.])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Pandas Series + Upper and Lower Bound
y_pred = logistic5param_capped( | pd.Series([1., 2., 3.]) | pandas.Series |
from argparse import ArgumentParser
from pathlib import Path
from typing import Tuple
import pandas as pd
from sklearn.model_selection import train_test_split
def split_metadata(original_metadata: pd.DataFrame, train_fraction: float, random_state: int) -> Tuple[pd.DataFrame, pd.DataFrame]:
image_ids = | pd.unique(original_metadata['image_id']) | pandas.unique |
from warnings import catch_warnings, simplefilter
import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import (
DataFrame, MultiIndex, Series, Timestamp, date_range, isna, notna)
from pandas.util import testing as tm
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestMultiIndexSetItem(object):
def test_setitem_multiindex(self):
with catch_warnings(record=True):
for index_fn in ('ix', 'loc'):
def assert_equal(a, b):
assert a == b
def check(target, indexers, value, compare_fn, expected=None):
fn = getattr(target, index_fn)
fn.__setitem__(indexers, value)
result = fn.__getitem__(indexers)
if expected is None:
expected = value
compare_fn(result, expected)
# GH7190
index = MultiIndex.from_product([np.arange(0, 100),
np.arange(0, 80)],
names=['time', 'firm'])
t, n = 0, 2
df = DataFrame(np.nan, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=0,
compare_fn=assert_equal)
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=1,
compare_fn=assert_equal)
df = DataFrame(columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=2,
compare_fn=assert_equal)
# gh-7218: assigning with 0-dim arrays
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df,
indexers=((t, n), 'X'),
value=np.array(3),
compare_fn=assert_equal,
expected=3, )
# GH5206
df = DataFrame(np.arange(25).reshape(5, 5),
columns='A,B,C,D,E'.split(','), dtype=float)
df['F'] = 99
row_selection = df['A'] % 2 == 0
col_selection = ['B', 'C']
with catch_warnings(record=True):
df.ix[row_selection, col_selection] = df['F']
output = DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
with catch_warnings(record=True):
tm.assert_frame_equal(df.ix[row_selection, col_selection],
output)
check(target=df,
indexers=(row_selection, col_selection),
value=df['F'],
compare_fn=tm.assert_frame_equal,
expected=output, )
# GH11372
idx = MultiIndex.from_product([
['A', 'B', 'C'],
date_range('2015-01-01', '2015-04-01', freq='MS')])
cols = MultiIndex.from_product([
['foo', 'bar'],
date_range('2016-01-01', '2016-02-01', freq='MS')])
df = DataFrame(np.random.random((12, 4)),
index=idx, columns=cols)
subidx = MultiIndex.from_tuples(
[('A', | Timestamp('2015-01-01') | pandas.Timestamp |
import os
import glob
import numpy as np
import pandas as pd
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.advanced_activations import PReLU
from keras.layers.core import Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.optimizers import Adam
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
# Data Set
DATASET_FOLDER_PATH = "./"
INPUT_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "input")
TRAIN_FILE_PATH = os.path.join(INPUT_FOLDER_PATH, "train.csv")
TEST_FILE_PATH = os.path.join(INPUT_FOLDER_PATH, "test.csv")
SUBMISSION_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "submission")
ID_COLUMN_NAME = "id"
LABEL_COLUMN_NAME = "loss"
# Model Structure
BLOCK_NUM = 3
DENSE_DIM = 512
DROPOUT_RATIO = 0.5
# Training Procedure
CROSS_VALIDATION_NUM = 10
MAXIMUM_EPOCH_NUM = 1000000
EARLYSTOPPING_PATIENCE = 20
TRAIN_BATCH_SIZE = 32
TEST_BATCH_SIZE = 1024
def load_data():
# Read file content
train_file_content = | pd.read_csv(TRAIN_FILE_PATH) | pandas.read_csv |
# -*- coding: utf-8 -*-
import os
import logging
import tempfile
import uuid
import shutil
import numpy as np
import pandas as pd
from rastertodataframe import util, tiling
log = logging.getLogger(__name__)
def raster_to_dataframe(raster_path, vector_path=None):
"""Convert a raster to a Pandas DataFrame.
Parameters
----------
raster_path : str
Path to raster file.
vector_path : str
Optional path to vector file. If given, raster pixels will be extracted
from features in the vector. If None, all raster pixels are converted
to a DataFrame.
Returns
-------
pandas.core.frame.DataFrame
"""
# Placeholders for possible temporary files.
temp_dir = vector_mask_fname = None
# Get raster band names.
ras = util.open_raster(raster_path)
raster_band_names = util.get_raster_band_names(ras)
# Create a mask from the pixels touched by the vector.
if vector_path is not None:
# Create a temporary directory for files.
temp_dir = tempfile.mkdtemp()
vec_with_fid = os.path.join(temp_dir, '{}'.format(uuid.uuid1()))
# Add a dummy feature ID column to the vector.
# This is not always present in OGR features.
vec_gdf = util.open_vector(vector_path, with_geopandas=True)
mask_values = list(range(1, len(vec_gdf) + 1))
vec_gdf['__fid__'] = | pd.Series(mask_values) | pandas.Series |
# heartparser.py
# Author: <NAME>
#
# This script parses the Apple Health export xml file for Heart Rate and
# Blood Pressure data and produces graphs of the data for given date ranges.
import numpy as np
from datetime import date, datetime, timedelta as td
from matplotlib import pyplot, dates as mdates
from matplotlib.ticker import FormatStrFormatter
from openpyxl import load_workbook
from pandas import DataFrame, ExcelWriter, Index
from seaborn import lmplot, regplot
from xml.dom import minidom
def main():
dates = ["2016-12-11", "2017-01-05"]
datelist = makedatelist(dates)
xmldoc = minidom.parse('export.xml')
recordlist = xmldoc.getElementsByTagName('Record')
plotheartrate(dates, datelist, recordlist)
plotbp(dates, datelist, recordlist)
def plotheartrate(dates, datelist, recordlist):
# This module parses the heart rate data from the xml file and calls
# the plotting function.
category = "HKQuantityTypeIdentifierHeartRate"
df_hr = parse(category, recordlist)
df_hr.columns = ['Time', 'Heart Rate (bpm)']
df_weekhr = weeklyhr(datelist, df_hr)
for i in range(52):
maxnum = df_weekhr[df_weekhr['Week'] == i].max()
print(maxnum)
makehrplot(dates, df_weekhr)
def weeklyhr(datelist, df_hr):
# This module filters through df_hr and returns a dataframe containing
# weekly heart rate values.
list_weekhr = []
dict_weekday = {}
for di in range(len(datelist)):
year = int(datelist[di][:4])
month = int(datelist[di][5:7])
day = int(datelist[di][8:10])
weekj = date(year, month, day).isocalendar()[1]
if weekj not in dict_weekday:
dict_weekday[weekj] = mdates.date2num(datetime(year, month, day))
try:
i = df_hr.index.get_loc(datelist[di])
di_hr = df_hr.get_value(i, 1, takeable=True)
list_weekhr.append({
'Date': mdates.date2num(datetime(year, month, day)),
'PlotDate': dict_weekday[weekj],
'Week': weekj,
'Heart Rate (bpm)': float(di_hr)
})
except:
pass
df_weekhr = | DataFrame(list_weekhr) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from numpy import nan
import numpy as np
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in | compat.iteritems(test_data) | pandas.compat.iteritems |
from scipy.spatial.distance import cosine
from scipy.stats import pearsonr
import numpy as np
import pandas as pd
def metric_for_similarity_f(b, metric="euclidean"):
if metric == "euclidean":
return lambda a:np.sqrt(np.sum((a-b)**2))#L2
elif metric == "cosine":
return lambda a:cosine(a,b)#cosine distance
elif metric == "-dot":
return lambda a:- np.dot(a,b)
elif metric == "1-pearson":
return lambda a:1.0 - pearsonr(a,b)[0]
else:
raise RuntimeError("Invalid metric!")
#def display(simple_array, num):
# k = len(simple_array);
# plt.figure(num);
# plt.plot(range(k), simple_array);
def get_topk_similar(reference_id, data_part, metric, gen_id_to_account, k=10, verbose=True):
snapshot_id, feats, relevance_labels = data_part
snapshot_df = feats.set_index(0)
row_count = snapshot_df.shape[0]
distances = []
#compute each distance
rep_of_reference = np.array(snapshot_df.loc[reference_id])
indices = snapshot_df.index
metric_f = metric_for_similarity_f(rep_of_reference, metric)
for idx, row in enumerate(snapshot_df.values):
if indices[idx] != reference_id:
distance = metric_f(row)
distances.append( (indices[idx], distance) )
rdf = | pd.DataFrame(distances, columns=["id","dist"]) | pandas.DataFrame |
# The test is referenced from https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html
import time
import hdbscan
import warnings
import sklearn.cluster
import scipy.cluster
import sklearn.datasets
import numpy as np
import pandas as pd
import seaborn as sns
from numpy.linalg import norm
from classix.aggregation_test import aggregate
from classix import CLASSIX
from quickshift.QuickshiftPP import *
from sklearn import metrics
import matplotlib.pyplot as plt
from threadpoolctl import threadpool_limits
np.random.seed(0)
def benchmark_algorithm_tdim(dataset_dimensions, cluster_function, function_args, function_kwds,
dataset_size=10000, dataset_n_clusters=10, max_time=45, sample_size=10, algorithm=None):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result_time = np.nan * np.ones((len(dataset_dimensions), sample_size))
result_ar = np.nan * np.ones((len(dataset_dimensions), sample_size))
result_ami = np.nan * np.ones((len(dataset_dimensions), sample_size))
for index, dimension in enumerate(dataset_dimensions):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
# set cluster_std=0.1 to ensure clustering rely less on tuning parameters.
data, labels = sklearn.datasets.make_blobs(n_samples=dataset_size,
n_features=dimension,
centers=dataset_n_clusters,
cluster_std=1)
# Start the clustering with a timer
start_time = time.time()
cluster_function.fit(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
if algorithm == "Quickshift++":
preds = cluster_function.memberships
else:
preds = cluster_function.labels_
# print("labels num:", len(np.unique(preds)))
ar = metrics.adjusted_rand_score(labels, preds)
ami = metrics.adjusted_mutual_info_score(labels, preds)
# If we are taking more than max_time then abort -- we don't
# want to spend excessive time on slow algorithms
if time_taken > max_time: # Luckily, it won't happens in our experiment.
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
return pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
else:
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
# Return the result as a dataframe for easier handling with seaborn afterwards
return pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
def benchmark_algorithm_tsize(dataset_sizes, cluster_function, function_args, function_kwds,
dataset_dimension=10, dataset_n_clusters=10, max_time=45, sample_size=10, algorithm=None):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result_time = np.nan * np.ones((len(dataset_sizes), sample_size))
result_ar = np.nan * np.ones((len(dataset_sizes), sample_size))
result_ami = np.nan * np.ones((len(dataset_sizes), sample_size))
for index, size in enumerate(dataset_sizes):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
# set cluster_std=0.1 to ensure clustering rely less on tuning parameters.
data, labels = sklearn.datasets.make_blobs(n_samples=size,
n_features=dataset_dimension,
centers=dataset_n_clusters,
cluster_std=1)
# Start the clustering with a timer
start_time = time.time()
cluster_function.fit(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
if algorithm == "Quickshift++":
preds = cluster_function.memberships
else:
preds = cluster_function.labels_
# print("labels num:", len(np.unique(preds)))
ar = metrics.adjusted_rand_score(labels, preds)
ami = metrics.adjusted_mutual_info_score(labels, preds)
# If we are taking more than max_time then abort -- we don't
# want to spend excessive time on slow algorithms
if time_taken > max_time: # Luckily, it won't happens in our experiment.
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
else:
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
# Return the result as a dataframe for easier handling with seaborn afterwards
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
def rn_gaussian_dim():
warnings.filterwarnings("ignore")
sns.set_context('poster')
sns.set_palette('Paired', 10)
sns.set_color_codes()
dataset_dimensions = np.hstack([np.arange(1, 11) * 10])
np.random.seed(0)
with threadpool_limits(limits=1, user_api='blas'):
k_means = sklearn.cluster.KMeans(n_clusters=10, init='k-means++')
k_means_time, k_means_ar, k_means_ami = benchmark_algorithm_tdim(dataset_dimensions, k_means, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=10, min_samples=1, n_jobs=1, algorithm='ball_tree')
dbscan_btree_time, dbscan_btree_ar, dbscan_btree_ami = benchmark_algorithm_tdim(dataset_dimensions, dbscan, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=10, min_samples=1, n_jobs=1, algorithm='kd_tree')
dbscan_kdtree_time, dbscan_kdtree_ar, dbscan_kdtree_ami = benchmark_algorithm_tdim(dataset_dimensions, dbscan, (), {})
hdbscan_ = hdbscan.HDBSCAN(algorithm='best', core_dist_n_jobs=1)
hdbscan_time, hdbscan_ar, hdbscan_ami = benchmark_algorithm_tdim(dataset_dimensions, hdbscan_, (), {})
classix = CLASSIX(sorting='pca', radius=0.3, minPts=5, group_merging='distance', verbose=0)
classix_time, classix_ar, classix_ami = benchmark_algorithm_tdim(dataset_dimensions, classix, (), {})
quicks = QuickshiftPP(k=20, beta=0.7)
quicks_time, quicks_ar, quicks_ami = benchmark_algorithm_tdim(dataset_dimensions, quicks, (), {}, algorithm='Quickshift++')
k_means_time.to_csv("results/exp1/gd_kmeans_time.csv",index=False)
dbscan_kdtree_time.to_csv("results/exp1/gd_dbscan_kdtree_time.csv",index=False)
dbscan_btree_time.to_csv("results/exp1/gd_dbscan_btree_time.csv",index=False)
hdbscan_time.to_csv("results/exp1/gd_hdbscan_time.csv",index=False)
classix_time.to_csv("results/exp1/gd_classix_time.csv",index=False)
quicks_time.to_csv("results/exp1/gd_quicks_time.csv",index=False)
k_means_ar.to_csv("results/exp1/gd_kmeans_ar.csv",index=False)
dbscan_kdtree_ar.to_csv("results/exp1/gd_dbscan_kdtree_ar.csv",index=False)
dbscan_btree_ar.to_csv("results/exp1/gd_dbscan_btree_ar.csv",index=False)
hdbscan_ar.to_csv("results/exp1/gd_hdbscan_ar.csv",index=False)
classix_ar.to_csv("results/exp1/gd_classix_ar.csv",index=False)
quicks_ar.to_csv("results/exp1/gd_quicks_ar.csv",index=False)
def rn_gaussian_size():
warnings.filterwarnings("ignore")
sns.set_context('poster')
sns.set_palette('Paired', 10)
sns.set_color_codes()
np.random.seed(0)
dataset_sizes = np.hstack([np.arange(1, 11) * 5000])
np.random.seed(0)
with threadpool_limits(limits=1, user_api='blas'):
k_means = sklearn.cluster.KMeans(n_clusters=10, init='k-means++')
k_means_time, k_means_ar, k_means_ami = benchmark_algorithm_tsize(dataset_sizes, k_means, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=3, min_samples=1, n_jobs=1, algorithm='ball_tree')
dbscan_btree_time, dbscan_btree_ar, dbscan_btree_ami = benchmark_algorithm_tsize(dataset_sizes, dbscan, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=3, min_samples=1, n_jobs=1, algorithm='kd_tree')
dbscan_kdtree_time, dbscan_kdtree_ar, dbscan_kdtree_ami = benchmark_algorithm_tsize(dataset_sizes, dbscan, (), {})
hdbscan_ = hdbscan.HDBSCAN(algorithm='best', core_dist_n_jobs=1)
hdbscan_time, hdbscan_ar, hdbscan_ami = benchmark_algorithm_tsize(dataset_sizes, hdbscan_, (), {})
classix = CLASSIX(sorting='pca', radius=0.3, minPts=5, group_merging='distance', verbose=0)
classix_time, classix_ar, classix_ami = benchmark_algorithm_tsize(dataset_sizes, classix, (), {})
quicks = QuickshiftPP(k=20, beta=0.7)
quicks_time, quicks_ar, quicks_ami = benchmark_algorithm_tsize(dataset_sizes, quicks, (), {}, algorithm='Quickshift++')
k_means_time.to_csv("results/exp1/gs_kmeans_time.csv",index=False)
dbscan_kdtree_time.to_csv("results/exp1/gs_dbscan_kdtree_time.csv",index=False)
dbscan_btree_time.to_csv("results/exp1/gs_dbscan_btree_time.csv",index=False)
hdbscan_time.to_csv("results/exp1/gs_hdbscan_time.csv",index=False)
classix_time.to_csv("results/exp1/gs_classix_time.csv",index=False)
quicks_time.to_csv("results/exp1/gs_quicks_time.csv",index=False)
k_means_ar.to_csv("results/exp1/gs_kmeans_ar.csv",index=False)
dbscan_kdtree_ar.to_csv("results/exp1/gs_dbscan_kdtree_ar.csv",index=False)
dbscan_btree_ar.to_csv("results/exp1/gs_dbscan_btree_ar.csv",index=False)
hdbscan_ar.to_csv("results/exp1/gs_hdbscan_ar.csv",index=False)
classix_ar.to_csv("results/exp1/gs_classix_ar.csv",index=False)
quicks_ar.to_csv("results/exp1/gs_quicks_ar.csv",index=False)
def run_gassian_plot():
# -------------------------------dim
k_means_time = pd.read_csv("results/exp1/gd_kmeans_time.csv")
dbscan_kdtree_time = pd.read_csv("results/exp1/gd_dbscan_kdtree_time.csv")
dbscan_btree_time = pd.read_csv("results/exp1/gd_dbscan_btree_time.csv")
hdbscan_time = pd.read_csv("results/exp1/gd_hdbscan_time.csv")
classix_time = pd.read_csv("results/exp1/gd_classix_time.csv")
quicks_time = pd.read_csv("results/exp1/gd_quicks_time.csv")
k_means_ar = pd.read_csv("results/exp1/gd_kmeans_ar.csv")
dbscan_kdtree_ar = pd.read_csv("results/exp1/gd_dbscan_kdtree_ar.csv")
dbscan_btree_ar = pd.read_csv("results/exp1/gd_dbscan_btree_ar.csv")
hdbscan_ar = pd.read_csv("results/exp1/gd_hdbscan_ar.csv")
classix_ar = pd.read_csv("results/exp1/gd_classix_ar.csv")
quicks_ar = pd.read_csv("results/exp1/gd_quicks_ar.csv")
plt.figure(figsize=(12,8))
plt.style.use('bmh')
sns.set(font_scale=1.8)
sns.set_style("whitegrid")
plt.rcParams['axes.facecolor'] = 'white'
# plt.rc('font', family='serif')
ax = sns.lineplot(data=k_means_time, x="x", y="y", marker='v', markersize=13, label='k-means++', linestyle="-", linewidth=6)
ax = sns.lineplot(data=dbscan_kdtree_time, x="x", y="y", marker='s', markersize=13, label='DBSCAN (kd-tree)', linestyle="--", linewidth=6)
ax = sns.lineplot(data=dbscan_btree_time, x="x", y="y", marker='o', markersize=13, label='DBSCAN (ball tree)', linestyle=":", linewidth=6)
ax = sns.lineplot(data=hdbscan_time, x="x", y="y", marker='<', markersize=13, label='HDBSCAN', linestyle="-", linewidth=6)
ax = sns.lineplot(data=classix_time, x="x", y="y", marker='*', markersize=17, label='CLASSIX', linestyle="--", linewidth=6)
ax = sns.lineplot(data=quicks_time, x="x", y="y", marker='p', markersize=17, label='Quickshift++', linestyle=(0, (3, 1, 1, 1, 1, 1)), linewidth=6)
ax.set(xlabel='dimensions', ylabel='time (s)', title="Gaussian blobs (n=10000)")
plt.tick_params(axis='both', labelsize=22)
plt.savefig('results/exp1/gaussian_dim_time.pdf', bbox_inches='tight')
plt.figure(figsize=(12,8))
plt.style.use('bmh')
sns.set(font_scale=1.8)
sns.set_style("whitegrid")
plt.rcParams['axes.facecolor'] = 'white'
# plt.rc('font', family='serif')
ax = sns.lineplot(data=k_means_ar, x="x", y="y", marker='v', markersize=13, label='k-means++', linestyle="-", linewidth=6)
ax = sns.lineplot(data=dbscan_kdtree_ar, x="x", y="y", marker='s', markersize=13, label='DBSCAN (kd-tree)', linestyle="--", linewidth=6)
ax = sns.lineplot(data=dbscan_btree_ar, x="x", y="y", marker='o', markersize=13, label='DBSCAN (ball tree)', linestyle=":", linewidth=6)
ax = sns.lineplot(data=hdbscan_ar, x="x", y="y", marker='<', markersize=13, label='HDBSCAN', linestyle="-", linewidth=6)
ax = sns.lineplot(data=classix_ar, x="x", y="y", marker='*', markersize=17, label='CLASSIX', linestyle="--", linewidth=6)
ax = sns.lineplot(data=quicks_ar, x="x", y="y", marker='p', markersize=17, label='Quickshift++', linestyle=(0, (3, 1, 1, 1, 1, 1)), linewidth=6)
ax.set(xlabel='dimensions', ylabel='adjusted Rand index', title="Gaussian blobs (n=10000)")
ax.set(ylim=(-.1, 1.1))
plt.tick_params(axis='both', labelsize=22)
plt.savefig('results/exp1/gaussian_dim_ar.pdf', bbox_inches='tight')
# -------------------------------size
k_means_time = pd.read_csv("results/exp1/gs_kmeans_time.csv")
dbscan_kdtree_time = pd.read_csv("results/exp1/gs_dbscan_kdtree_time.csv")
dbscan_btree_time = pd.read_csv("results/exp1/gs_dbscan_btree_time.csv")
hdbscan_time = pd.read_csv("results/exp1/gs_hdbscan_time.csv")
classix_time = pd.read_csv("results/exp1/gs_classix_time.csv")
quicks_time = pd.read_csv("results/exp1/gs_quicks_time.csv")
k_means_ar = pd.read_csv("results/exp1/gs_kmeans_ar.csv")
dbscan_kdtree_ar = pd.read_csv("results/exp1/gs_dbscan_kdtree_ar.csv")
dbscan_btree_ar = pd.read_csv("results/exp1/gs_dbscan_btree_ar.csv")
hdbscan_ar = | pd.read_csv("results/exp1/gs_hdbscan_ar.csv") | pandas.read_csv |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import sklearn
#import gensim
import scipy
import numpy
import json
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import sys
import csv
import os
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import load
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import tree
from scipy.sparse import coo_matrix, hstack, vstack
from sklearn.metrics import accuracy_score, classification_report
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.neural_network import MLPClassifier
gp_df = pd.read_csv('gap_phase2_wCV.tsv', delimiter='\t')
numerical_features = gp_df.loc[:,['Pronoun-offset', 'A-offset', 'B-offset',
'offsetPronoun-A', 'offsetPronoun-B', 'pre_predict-A',
'pre_predict-B', 'pre_predict-N', 'tar_predict-A',
'tar_predict-B', 'tar_predict-N', ]]
cat_features = pd.get_dummies(gp_df.loc[:,['PronounNum', 'SentenceClass']].astype('category'),drop_first=True)
labels = gp_df.loc[:, 'PronounClass']
ids = gp_df.loc[:, 'ID']
features = pd.concat([numerical_features,cat_features], axis=1)
clfANN = MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto',
beta_1=0.95, beta_2=0.9995, early_stopping=False,
epsilon=1e-05, hidden_layer_sizes=(100,100),
learning_rate='constant', learning_rate_init=0.015,
max_iter=3000, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=0,
shuffle=True, solver='adam', tol=0.001,
validation_fraction=0.1, verbose=False, warm_start=False)
clfANN.fit(features, labels)
results = pd.DataFrame(clfANN.predict_proba(features))
finaldf = | pd.concat([ids,results], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# #### functions in this script are created searching for keywords related measures
import pandas as pd
from nltk import tokenize
from iteration_utilities import deepflatten
import re
import statistics as stat
def hasNumbers(inputString):
'''check if string has numbers'''
return any(char.isdigit() for char in inputString)
def split_para(paragraph):
'''
Input: paragraph string
Usage: splitting paragraph into segmentations, splitters are ",;" "with", "and"
Output: list of spliited sentences
'''
try:
sents = tokenize.sent_tokenize(paragraph)
except TypeError:
print('expecting a string input')
sents_new = list(map(lambda x:x.split('with '), sents))
sents_new = list(deepflatten(sents_new,types=list)) ##flatten list of lists
sents_new = list(map(lambda x:x.split(' and '), sents_new))
sents_new = list(deepflatten(sents_new,types=list))
sents_new = list(map(lambda x:x.split('surround'), sents_new))
sents_new = list(deepflatten(sents_new,types=list))
sents_new = list(map(lambda x:x.split(' but '), sents_new))
sents_new = list(deepflatten(sents_new,types=list))
return sents_new
def size_determ(rgx,keyword,sent):
'''
Input:
rgx: list of regex patterns
keyword: keyword to look for
sent: sentence
Output: return 1 if any rgx pattern along with keyword appears in sentence, else 0
'''
if rgx!=[]:
for i in rgx:
if i.findall(sent)!=[]:
return 1
if keyword in sent and hasNumbers(sent)==True:
return 1
else:
return 0 ## no size determination
def apply_f(a,f):
'''
Input:
a: list of lists
f: function
Usage: apply function to each string element in list of lists
'''
if isinstance(a,list):
return list(map(lambda t:apply_f(t,f), a))
else:
return f(a)
def rm_post_culture(sent):
'''
Input:
sent: sentence
Usage: exclude segments about post culture/after culture
'''
if "post culture" in sent or "after culture" in sent:
return ""
else:
return sent
def match_numbers(lst1,match_or_not):
'''
Input:
lst1: list of measurements
mactch_or_not: binary indicators from function size_determ(); 0 means no related measurement, 1 otherwise
Output: list of valid measurements related to keywords
'''
if match_or_not!=0:
return lst1
elif lst1==['0','0']:
return lst1
else:
return []
def no_measures(rgx_lst, sent_lst,measure_lst):
'''
Input:
rgx_lst: list of regex patterns to look for (ex: no infiltrate)
sent_lst: list of splitted segments
measure_lst: list of measurements found within sent_lst
Output: measure_lst as 0 if any regex pattern matched, else stay the same
'''
for j in rgx_lst:
for i in range(0,len(sent_lst)):
if j.findall(sent_lst[i])!=[]:
measure_lst[i]=['0','0']
return measure_lst
def rm_specialfmt(lst):
'''
Input:
lst: list of numerical tokens matched
Usage: deleting unwanted numerical measures such as time, data, percentage and so on
Output: list of validated numerical measures
'''
re_time = re.compile(r'\d+\:\d+')
re_date = re.compile(r'\d+\/\d+\/\d+')
re_time1 = re.compile(r'\d+\-\d+')
re_pct = re.compile(r'\d+%')
re_number = re.compile(r'\d{3,}')
re_plus = re.compile(r'\d+\+')
if len(lst)==0:
return lst
else:
new = [x for x in lst if not re_time.search(x) \
and not re_date.search(x) and not re_pct.search(x) \
and not re_number.search(x) and not re_time1.search(x) and not re_plus.search(x)]
return new
def assign_values(dat,col_inp,col1,col2):
'''
Input:
dat: dataframe
col_inp: text column
col1: column name for left eye measurement
col2: column name for right eye measurement
Usage: aggregate above functions, and assign values to dataframe column
Output: a panda dataframe
'''
dat[col1] = pd.Series(['na']*len(dat),index=dat.index)
dat[col2] = pd.Series(['na']*len(dat),index=dat.index)
for i in dat.index:
if dat.loc[i,col_inp]==[]:
dat.loc[i,col1]='na'
dat.loc[i,col2]='na'
elif len(dat.loc[i,col_inp])==1:
new = rm_specialfmt(dat.loc[i,col_inp][0])
if len(new) >= 2:
dat.loc[i,col1]=new[0]
dat.loc[i,col2]=new[1]
elif len(new) == 1:
dat.loc[i,col1]=new[0]
dat.loc[i,col2]=new[0]
elif len(new) == 0:
dat.loc[i,col1]='na'
dat.loc[i,col2]='na'
elif dat.loc[i,col_inp]!='na' and len(dat.loc[i,col_inp]) >= 2:
measures = []
for lsts in range(0,len(dat.loc[i,col_inp])):
measures.append(rm_specialfmt(dat.loc[i,col_inp][lsts]))
measures=apply_f(measures,eval)
measures = list(filter(None, measures))
if measures == []:
dat.loc[i,col1]='na'
dat.loc[i,col2]='na'
else:
mean = list(map(stat.mean,measures))
idx = mean.index(max(mean))
if len(measures[idx])>=2:
dat.loc[i,col1]=measures[idx][0]
dat.loc[i,col2]=measures[idx][1]
elif len(measures[idx])==1:
dat.loc[i,col1]=measures[idx][0]
dat.loc[i,col2]=measures[idx][0]
return dat
def reduce(dat, inp_col):
'''
input:
dat: dataframe
inp_col: text column
usage: take the maximum measure if there are more than one related measures
output: a panda dataframe
'''
for i in dat[dat[inp_col].apply(lambda x:len(x)>=2)].index.tolist():
measures=apply_f(dat.loc[i,inp_col],eval)
measures = list(filter(None, measures))
if measures == []:
dat.loc[i,inp_col]='na'
else:
mean = list(map(stat.mean,measures))
idx = mean.index(max(mean))
dat.loc[i,inp_col] = str([measures[idx]])
return dat
## commonly appeared 0 cases in the training set, ex: no defect, healed defect
key_wd = "<PASSWORD>"
ed_rgx = [re.compile(r'no (\w* ){0,}defect|without (\w* ){0,}defect'),
re.compile(r'epi\w+ ((?!not)\w*(%)? ){0,}intact|intact (\w* ){0,}epi\w+'),
re.compile(r'epi\w+ (defect)? ((?!nearly)(?!almost)\w* ){0,}healed|healed (epi\w+)? defect'),
re.compile(r'epithelial defect ((?!almost)(?!mostly)\w* ){0,}resolved|resolved (\w* ){0,}(epithelial )?defect'),
re.compile(r'(no|without|resolved|negative) (\w* ){0,}stain(ing)?'),
re.compile(r'epi\w+ irregularity'), re.compile(r'pinpoint (\w* ){0,}epithelial defect'),
re.compile(r'(\w*(?!no) ){0,}epithelial erosion'),re.compile(r'pinpoint (\w*(?!no) ){0,}stain'),
re.compile(r'punctate (\w* ){0,}epi\w+ defect'),re.compile(r'epithelium healed'),
re.compile(r'punctate stain'),re.compile(r'defect (\w*(?!no) ){0,}closed')]
inf_rgx = [re.compile(r'no (\w* ){0,}infiltrat\w+|without (\w*(?!defect) ){0,}infiltrat'),
re.compile(r'infiltrat\w+ ((?!nearly)(?!almost)\w* ){0,}healed|healed (\w* ){0,}infiltrat\w+'),
re.compile(r'infiltrat\w+ ((?!almost)(?!mostly)\w* ){0,}resolved|resolved (\w* ){0,}infiltrat'),
re.compile(r'punctate( \w*){0,}\/?infiltrat\w+'),re.compile(r'punctate (\w* ){0,}infiltrat\w+'),
re.compile(r'pinpoint (\w* ){0,}infiltrat\w+')]
## match numerical measurements
re_float = re.compile(r'\d*\.?\/?\:?\-?\/?\d+\/?\d*\%?')
def measure_or_not(sents,rgx,kwd):
'''
input:
sents: string of sentences
rgx: regex pattern of 0 cases
kwd: keyword you are looking for, ex: defect, infiltrate
output: list of binarys indicating whether each segment has related measures to the keyword or not
'''
sent_lst = split_para(sents)
sent_lst = apply_f(sent_lst,rm_post_culture)
measure = list(map(lambda x: size_determ(rgx,kwd,x), sent_lst))
measure = list(map(lambda x: 1 if x >=1 else 0, measure))
return measure
def measure(sents, kwd, rgx,re_float=re_float):
'''
input:
sents: string of sentences
kwd: keyword you are looking for, ex: defect, infiltrate
rgx: regex pattern of 0 cases
re_float: regex matching any numerical tokens
output: list of lists for the numerical measures found at segment level
'''
sent_lst = split_para(sents)
sent_lst = apply_f(sent_lst,rm_post_culture) ## splitted segments
measure_01 = measure_or_not(sents,rgx,kwd) ## binary indicator
size = apply_f(sent_lst, lambda t: re.findall(re_float, t)) ## all numerical tokens
size = no_measures(rgx, sent_lst, size) ## find 0 cases and assign measurement as 0
new_size=[]
for i in range(0,len(size)):
new_size.append(match_numbers(size[i], measure_01[i]))
results = list(map(lambda x:rm_specialfmt(x), new_size)) ## remove unwanted numerical tokens
results = list(filter(None,results))
## take the first 2 measures if found multiple
results = list(map(lambda x: x[:2] if len(x)>=2 else [x[0],x[0]], results))
return results
def measure_max(sents, kwd, rgx,re_float=re_float):
'''
input:
sents: string of sentences
kwd: keyword you are looking for, ex: defect, infiltrate
rgx: regex pattern of 0 cases
re_float: regex matching any numerical tokens
output: dictionary for the numerical measures related to kwd at encounter level
'''
measures = measure(sents, kwd,rgx, re_float=re_float)
if measures == None:
return {kwd: (None,None)}
measures = apply_f(measures,eval)
if len(measures)==1 and type(measures[0])==list:
return {kwd: tuple(measures[0])}
elif len(measures)>=2:
mean = list(map(stat.mean,measures))
idx = mean.index(max(mean))
return {kwd: tuple(measures[idx])}
else:
return {kwd: (None,None)}
def measure_size(sents,re_float=re_float, rgx=[ed_rgx,inf_rgx]):
'''
input:
sents: string of sentences
rgx: regex pattern of 0 cases
re_float: regex matching any numerical tokens
output: dictionary of valid measurements at encounter level, both defect and infiltrate
'''
ed_rgx = rgx[0]
inf_rgx = rgx[1]
ed_size=measure_max(sents, 'defect',re_float=re_float, rgx=ed_rgx).get('defect')
st_size=measure_max(sents, 'stain',re_float=re_float, rgx=ed_rgx).get('stain')
if ed_size==(None,None) and st_size!=(None,None):
ed_size=st_size
inf_size=measure_max(sents, 'infiltrat',re_float=re_float, rgx=inf_rgx).get('infiltrat')
uc_size=measure_max(sents, 'ulcer',re_float=re_float, rgx=inf_rgx).get('ulcer')
if inf_size==(None,None) and uc_size!=(None,None):
inf_size=uc_size
sents = sents.replace("'","")
if re.findall(r'defect[\s\w*]*\,? [\w*\s]*with[\s\w*]* infiltrate', sents)!=[] and ed_size==(None,None) and inf_size!=(None,None):
ed_size=inf_size
if re.findall(r'defect[\s\w*]*\,? [\w*\s]*with[\s\w*]* infiltrate', sents)!=[] and ed_size!=(None,None) and inf_size==(None,None):
inf_size=ed_size
if re.findall(r'infiltrate[\s\w*]*\,? [\w*\s]*with[\s\w*]* defect', sents)!=[] and ed_size!=(None,None) and inf_size==(None,None):
inf_size=ed_size
if re.findall(r'infiltrate[\s\w*]*\,? [\w*\s]*with[\s\w*]* defect', sents)!=[] and ed_size==(None,None) and inf_size!=(None,None):
ed_size=inf_size
return{'defect':ed_size, 'infiltrate': inf_size}
## example:
sent = '''3+ superficial punctate keratitis diffusely over graft; epithelial defect measuring 4.8 mm vertically x 2.1 mm horizontally with no stromal infiltrate underlying defect; small epithelial defect over central axis measuring x 0.9 mm vertical x 1.0 mm horizontal ; moderate diffuse stromal edema with descemet folds ; superficial stuck-on-appearing plaque-like opacities diffusely on graft with hazy borders, no significant corneal thinning'''
print(measure_size(sent))
print(measure_max(sent, 'infiltrate',inf_rgx,re_float=re_float))
print(measure_max(sent, 'defect',ed_rgx,re_float=re_float))
## on the whole dataframe
inp_path = 'df_corrected.csv'
out_path = 'measured.csv'
left_eye_col = 'corrected_left' ## output smart text column after spelling correction: left eyes
right_eye_col = 'corrected_right' ## output smart text column after spelling correction: right eyes
#dat=open(dat_inp,errors='ignore')
#dat=dat.read()
#test = StringIO(dat)
df = pd.read_csv(inp_path, sep=",",na_values=" ")
df['description'] = df[left_eye_col] + '. ' + df[right_eye_col]
df['description'] = pd.Series([re.sub(r'(\d+\:?\d*)-(\d+\:?\d*)','\1 - \2', x) for x in df.description.values.tolist()], index = df.index)
measures = [measure_size(x) for x in df.description.values.tolist()]
df['ed_measure1'] = pd.Series([x['defect'][0] for x in measures])
df['ed_measure2'] = | pd.Series([x['defect'][1] for x in measures]) | pandas.Series |
import numpy as np
import pandas as pd
import scipy.sparse as sp
import sklearn.preprocessing as pp
from math import exp
from heapq import heappush, heappop
# conventional i2i
class CosineSimilarity():
# expects DataFrame, loaded from ratings.csv
def __init__(self, df, limit=20):
self.limit = limit
# no need for timestamp here
df = df.drop(labels = 'timestamp', axis = 1)
# let's see what's the mean rating for each movie,
df_mean = df.groupby(['movieId'], sort = False).mean().rename(columns = {'rating': 'mean'})
# join mean values to original DataFrame
df = df.join(df_mean, on = 'movieId', sort = False)
# and subtract mean values from each rating,
# so that rating of 0 becomes neutral
df['rating'] = df['rating'] - df['mean']
# ok? now pivot original DataFrame so that it becomes a feature/document matrix
# and fill all NaNs (where a user hasn't rated a movie) with zeros, which is legal now
df = df.pivot_table(index = 'userId', columns = 'movieId', values = 'rating').fillna(0)
# if there were movies, having only equal ratings (ie, all 4.0)
# then we can't really recommend anything to them, hence removing
df = df.loc[:, (df != 0).any(axis = 0)]
# here we go, let's turn DataFrame into sparse matrix, normalize ratings,
cnm = pp.normalize(sp.csr_matrix(df.values), axis = 0)
# calculate recommendations and turn sparse matrix back into DataFrame,
# having movieId index, movieId columns and values, representing relevance of A to B
self.recs = pd.DataFrame((cnm.T * cnm).todense(), columns=df.columns, index=df.columns)
# retrieves "limit" of recommendations for given movie_id out of precalculated DataFrame
def recommend(self, movie_id):
if not movie_id in self.recs.index.values:
return | pd.DataFrame([], columns=['movieId']) | pandas.DataFrame |
# Jun. 28th, 2020
# Score: 0.14508
import torch
from torch import nn
from torch.nn import init
import torch.utils.data as Data
import pandas as pd
from modules import base
def get_net(feature_num):
hidden_num = 32, 8
drop_prob = .001
net = nn.Sequential(
nn.Linear(feature_num, hidden_num[0]),
nn.ReLU(),
nn.Dropout(drop_prob),
nn.Linear(hidden_num[0], hidden_num[1]),
nn.ReLU(),
nn.Linear(hidden_num[1], 1)
)
for params in net.parameters():
init.normal_(params, mean=0, std=.01)
return net
def log_rmse(net, features, labels, loss):
with torch.no_grad():
clipped_preds = torch.max(net(features), torch.tensor(1.0))
rmse = torch.sqrt(loss(clipped_preds.log(), labels.log()))
return rmse.item()
def train(net, train_features, train_labels, test_features, test_labels,
loss, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [], []
dataset = Data.TensorDataset(train_features, train_labels)
train_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
optimizer = torch.optim.Adam(params=net.parameters(), lr=learning_rate, weight_decay=weight_decay)
net = net.float()
num_epochs = 0
while True:
for X, y in train_iter:
ls = loss(net(X.float()), y.float())
optimizer.zero_grad()
ls.backward()
optimizer.step()
train_ls.append(log_rmse(net, train_features, train_labels, loss))
if test_labels is not None:
test_ls.append(log_rmse(net, test_features, test_labels, loss))
num_epochs += 1
if len(train_ls) > 1:
if abs(train_ls[-1] - train_ls[-2]) < 1e-3:
break
return train_ls, test_ls, num_epochs
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = X.shape[0] // k
X_train, y_train = None, None
X_valid, y_valid = None, None
for j in range(k):
idx = slice(j * fold_size, (j + 1) * fold_size)
X_part, y_part = X[idx, :], y[idx]
if j == i:
X_valid, y_valid = X_part, y_part
elif X_train is None:
X_train, y_train = X_part, y_part
else:
X_train = torch.cat((X_train, X_part), dim=0)
y_train = torch.cat((y_train, y_part), dim=0)
return X_train, y_train, X_valid, y_valid
def k_fold(k, X_train, y_train,
learning_rate, weight_decay, batch_size):
loss = nn.MSELoss()
train_loss_sum, valid_loss_sum = 0, 0
for i in range(k):
data = get_k_fold_data(k, i, X_train, y_train)
net = get_net(X_train.shape[1])
train_ls, valid_ls, num_epochs = train(net, *data, loss, learning_rate,
weight_decay, batch_size)
train_loss_sum += train_ls[-1]
valid_loss_sum += valid_ls[-1]
if i == k - 1:
base.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse',
range(1, num_epochs + 1), valid_ls,
['train', 'valid'])
print('fold %d, train rmse %f, valid rmse %f' % (i, train_ls[-1], valid_ls[-1]))
return train_loss_sum / k, valid_loss_sum / k
def train_and_pred(train_features, test_features, train_labels, test_data,
lr, weight_decay, batch_size):
loss = nn.MSELoss()
net = get_net(train_features.shape[1])
train_ls, _, num_epochs = train(net, train_features, train_labels, None, None,
loss, lr, weight_decay, batch_size)
base.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse')
print('train rmse %f' % train_ls[-1])
preds = net(test_features).detach().numpy()
test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
submission.to_csv('./submission.csv', index=False)
def main():
torch.set_default_tensor_type(torch.FloatTensor)
path = './data'
train_data = pd.read_csv(path + '/train.csv')
test_data = pd.read_csv(path + '/test.csv')
all_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))
# data preprocessing
numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index
all_features[numeric_features] = all_features[numeric_features].apply(
lambda x: (x - x.mean()) / (x.std()))
all_features[numeric_features] = all_features[numeric_features].fillna(0)
all_features = | pd.get_dummies(all_features, dummy_na=True) | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank(pd.DataFrame(data['Vwap'] + data['Close']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha120']
return alpha
@timer
def alpha121(self):
vwap = self.vwap
volume = self.volume
vwap_r = TsRank(vwap,20)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,2)
data = pd.concat([vwap_r,volume_mean_r], axis = 1, join = 'inner')
corr= Corr(data,18)
temp = TsRank(corr,3)
vwap_min = TsMin(vwap,12)
data2 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data2.columns = ['vwap','vwap_min']
rank = Rank(pd.DataFrame(data2['vwap'] - data2['vwap_min']))
data3 = pd.concat([rank,temp],axis = 1, join = 'inner')
data3.columns = ['rank','temp']
alpha = pd.DataFrame(np.power(data3['rank'],data3['temp']) * -1)
alpha.columns = ['alpha121']
return alpha
@timer
def alpha122(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close_ln,13,2)
sma1 = SMA(temp1,13,2)
sma2 = SMA(sma1,13,2)
sma3 = SMA(sma2,13,2)
sma3_delay = Delay(sma3,1)
data = pd.concat([sma3,sma3_delay],axis = 1, join = 'inner')
data.columns = ['sma','sma_delay']
alpha = pd.DataFrame(data['sma']/data['sma_delay'])
alpha.columns = ['alpha122']
return alpha
@timer
def alpha123(self):
volume = self.volume
high = self.high
low = self.low
data1 = pd.concat([high,low], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame((data1['High'] + data1['Low'])/2),20)
volume_mean = Mean(volume,60)
s2 = Sum(volume_mean,20)
data2 = pd.concat([s1,s2], axis = 1, join = 'inner')
corr1 = Corr(data2,9)
data3 = pd.concat([low,volume], axis = 1, join = 'inner')
corr2 = Corr(data3,6)
corr1_r = Rank(corr1)
corr2_r = Rank(corr2)
data = pd.concat([corr1_r,corr2_r], axis = 1, join = 'inner')
data.columns = ['r1','r2']
data['alpha'] = -1
data['alpha'][data['r1'] >= data['r2']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha123']
return alpha
@timer
def alpha124(self):
close = self.close
vwap = self.vwap
close_max = TsMax(close,30)
close_max_r = Rank(close_max)
close_max_r_decay = DecayLinear(close_max_r,2)
close_max_r_decay.columns = ['decay']
data = pd.concat([close,vwap,close_max_r_decay], axis = 1, join ='inner')
alpha = pd.DataFrame((data['Close'] - data['Vwap'])/data['decay'])
alpha.columns = ['alpha124']
return alpha
@timer
def alpha125(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,80)
data1 = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr1 = Corr(data1,17)
data2 = pd.concat([close,vwap], axis = 1, join = 'inner')
temp2 = pd.DataFrame(0.5*(data2['Close'] + data2['Vwap']))
temp2_delta = Delta(temp2,3)
corr1_decay = DecayLinear(corr1,20)
r1 = Rank(corr1_decay)
temp2_delta_decay = DecayLinear(temp2_delta,16)
r2 = Rank(temp2_delta_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha125']
return alpha
@timer
def alpha126(self):
close = self.close
high = self.high
low = self.low
data = pd.concat([close,high,low], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] + data['High'] + data['Low'])/3)
alpha.columns = ['alpha126']
return alpha
@timer
def alpha127(self):
close = self.close
close_max = TsMax(close,12)
data = pd.concat([close,close_max], axis = 1, join = 'inner')
data.columns = ['close','close_max']
alpha = pd.DataFrame((data['close'] - data['close_max'])/data['close_max'])
alpha.columns = ['alpha127']
return alpha
@timer
def alpha128(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
data = pd.concat([close,high,low,volume], axis = 1, join = 'inner')
data['temp1'] = (data['Close'] + data['Low'] + data['High'])/3
data['temp2'] = data['temp1'] * data['Vol']
data['temp3'] = data['temp1'] * data['Vol']
temp_delay = Delay(pd.DataFrame(data['temp1']),1)
temp_delay.columns = ['temp_decay']
data = pd.concat([data,temp_delay], axis = 1, join = 'inner')
data['temp2'][data['temp1'] < data['temp_decay']] = 0
data['temp3'][data['temp1'] > data['temp_decay']] = 0
s1 = Sum(pd.DataFrame(data['temp2']),14)
s2 = Sum(pd.DataFrame(data['temp3']),14)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(100 - 100/(1+ s['s1']/s['s2']))
alpha.columns = ['alpha128']
return alpha
@timer
def alpha129(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['abs'] = np.abs(data['close'] - data['close_delay'])
data['temp'] = data['abs']
data['temp'][data['close'] < data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha129']
return alpha
@timer
def alpha130(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,40)
data1 = pd.concat([high,low],axis = 1, join = 'inner')
temp1 = pd.DataFrame((data1['High'] + data1['Low'])/2)
rank1 = pd.concat([temp1,volume_mean], axis = 1, join = 'inner')
corr = Corr(rank1,9)
close_r = Rank(close)
volume_r = Rank(volume)
data2 = pd.concat([close_r,volume_r],axis = 1, join = 'inner')
corr2 = Corr(data2,7)
corr_decay = DecayLinear(corr,10)
r1 = Rank(corr_decay)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha130']
return alpha
@timer
def alpha131(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,50)
data1 = pd.concat([close,volume_mean], axis = 1, join = 'inner')
corr = Corr(data1,18)
vwap_delta = Delta(vwap,1)
temp2 = TsRank(corr,18)
data2 = pd.concat([vwap_delta,temp2],axis = 1, join = 'inner')
data2.columns = ['vwap_delta','temp2']
temp3 = np.power(data2['vwap_delta'],data2['temp2'])
alpha = Rank(pd.DataFrame(temp3))
alpha.columns = ['alpha131']
return alpha
@timer
def alpha132(self):
amt = self.amt
alpha = Mean(amt,20)
alpha.columns = ['alpha132']
return alpha
@timer
def alpha133(self):
low = self.low
high = self.high
highday = Highday(high,20)
lowday = Lowday(low,20)
data = pd.concat([highday,lowday],axis = 1, join = 'inner')
data.columns = ['highday','lowday']
alpha = (20 - data['highday']/20.0) * 100 - (20 - data['lowday']/20.0) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha133']
return alpha
@timer
def alpha134(self):
close = self.close
volume = self.volume
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,volume,close_delay], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] - data['close_delay'])/data['close_delay'])
alpha.columns = ['alpha134']
return alpha
@timer
def alpha135(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1 , join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
alpha = SMA(temp_delay,20,1)
alpha.columns = ['alpha135']
return alpha
@timer
def alpha136(self):
volume = self.volume
Open = self.open
ret = self.ret
ret_delta = Delta(ret,3)
ret_delta_r = Rank(ret_delta)
data = pd.concat([Open,volume],axis = 1, join = 'inner')
corr = Corr(data,10)
data_temp = pd.concat([ret_delta_r,corr],axis = 1, join = 'inner')
data_temp.columns = ['ret_delta','corr']
alpha = pd.DataFrame(-1 * data_temp['ret_delta'] * data_temp['corr'])
alpha.columns = ['alpha136']
return alpha
@timer
def alpha137(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = | pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner') | pandas.concat |
import os
import pickle
from functools import reduce
from tqdm import tqdm
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
from sklearn.metrics import roc_auc_score, plot_precision_recall_curve
import matplotlib.pyplot as plt
def optimal_binning_boundary(x: pd.Series, y: pd.Series, nan: float = -999.) -> list:
'''
利用决策树获得最优分箱的边界值列表
'''
boundary = [] # 待return的分箱边界值列表
x = x.fillna(nan).values # 填充缺失值
y = y.values
clf = DecisionTreeClassifier(criterion='entropy', #“信息熵”最小化准则划分
max_leaf_nodes=6, # 最大叶子节点数
min_samples_leaf=0.05) # 叶子节点样本数量最小占比
clf.fit(x.reshape(-1, 1), y) # 训练决策树
n_nodes = clf.tree_.node_count
children_left = clf.tree_.children_left
children_right = clf.tree_.children_right
threshold = clf.tree_.threshold
for i in range(n_nodes):
if children_left[i] != children_right[i]: # 获得决策树节点上的划分边界值
boundary.append(threshold[i])
boundary.sort()
min_x = x.min()
max_x = x.max() + 0.1 # +0.1是为了考虑后续groupby操作时,能包含特征最大值的样本
boundary = [min_x] + boundary + [max_x]
return boundary
def ks_table(y_predict, y_true, ascending=False, is_cate=False, sep=None, nbins=10):
"""
计算KS分层表
:param y_predict: list, array, pandas.Series, 预测的概率或打分
:param y_true: list, array, pandas.Series, 真实的标签, 1或0, 只支持二分类
:param ascending: boolean, default False, 对y_predict排序的方式。
为False则降序排序,此时y_predict越大表示y_true为1(坏客户)的概率越大,一般适用于预测概率;
为True则升序排序,此时y_predict越小表示y_true为1(坏客户)的概率越大,一般适用于标准分;
:param sep: list, default None, 预设的分割点
:return: Pandas.DataFrame, 结果KS分层表
"""
if len(y_predict) < 10:
return None
if not isinstance(y_predict, pd.Series):
y_predict = pd.Series(y_predict)
if not isinstance(y_true, pd.Series):
y_true = pd.Series(y_true)
y_predict = y_predict.reset_index(drop=True)
y_true = y_true.reset_index(drop=True)
data = | pd.concat([y_predict, y_true], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = | tm.box_expected(td1, box) | pandas.util.testing.box_expected |
"""
Original code based on Kaggle competition
Modified to take 3-channel input
"""
from __future__ import division
import numpy as np
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, Cropping2D
from keras import backend as K
import keras
import h5py
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Nadam
from keras.callbacks import History
import pandas as pd
from keras.backend import binary_crossentropy
import datetime
import os
import random
import threading
import tensorflow as tf
from keras.models import model_from_json
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
img_rows = 112
img_cols = 112
smooth = 1e-12
num_channels = 3
num_mask_channels = 1
random.seed(0)
def jaccard_coef(y_true, y_pred):
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_loss(y_true, y_pred):
return -K.log(jaccard_coef(y_true, y_pred)) + binary_crossentropy(y_pred, y_true)
def get_unet0():
inputs = Input((num_channels, img_rows, img_cols))
skip1 = Convolution2D(12, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(inputs)
conv1 = Convolution2D(12, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(inputs)
conv1 = BatchNormalization(mode=0, axis=1)(conv1)
conv1 = keras.layers.advanced_activations.ELU()(conv1)
conv1 = Convolution2D(12, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv1)
conv1 = BatchNormalization(mode=0, axis=1)(conv1)
conv1 = keras.layers.Add()([conv1,skip1])
conv1 = keras.layers.advanced_activations.ELU()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2),dim_ordering='th')(conv1)
skip2 = Convolution2D(24, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(pool1)
conv2 = Convolution2D(24, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(pool1)
conv2 = BatchNormalization(mode=0, axis=1)(conv2)
conv2 = keras.layers.advanced_activations.ELU()(conv2)
conv2 = Convolution2D(24, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv2)
conv2 = BatchNormalization(mode=0, axis=1)(conv2)
conv2 = keras.layers.Add()([conv2,skip2])
conv2 = keras.layers.advanced_activations.ELU()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2),dim_ordering='th')(conv2)
skip3 = Convolution2D(48, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(pool2)
conv3 = Convolution2D(48, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(pool2)
conv3 = BatchNormalization(mode=0, axis=1)(conv3)
conv3 = keras.layers.advanced_activations.ELU()(conv3)
conv3 = Convolution2D(48, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv3)
conv3 = BatchNormalization(mode=0, axis=1)(conv3)
conv3 = keras.layers.Add()([conv3,skip3])
conv3 = keras.layers.advanced_activations.ELU()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2),dim_ordering='th')(conv3)
skip4 = Convolution2D(96, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(pool3)
conv4 = Convolution2D(96, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(pool3)
conv4 = BatchNormalization(mode=0, axis=1)(conv4)
conv4 = keras.layers.advanced_activations.ELU()(conv4)
conv4 = Convolution2D(96, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv4)
conv4 = BatchNormalization(mode=0, axis=1)(conv4)
conv4 = keras.layers.Add()([conv4,skip4])
conv4 = keras.layers.advanced_activations.ELU()(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2),dim_ordering='th')(conv4)
skip5 = Convolution2D(192, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(pool4)
conv5 = Convolution2D(192, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(pool4)
conv5 = BatchNormalization(mode=0, axis=1)(conv5)
conv5 = keras.layers.advanced_activations.ELU()(conv5)
conv5 = Convolution2D(192, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv5)
conv5 = BatchNormalization(mode=0, axis=1)(conv5)
conv5 = keras.layers.Add()([conv5,skip5])
conv5 = keras.layers.advanced_activations.ELU()(conv5)
up6 = merge([UpSampling2D(size=(2, 2),dim_ordering='th')(conv5), conv4], mode='concat', concat_axis=1)
skip6 = Convolution2D(96, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(up6)
conv6 = Convolution2D(96, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(up6)
conv6 = BatchNormalization(mode=0, axis=1)(conv6)
conv6 = keras.layers.advanced_activations.ELU()(conv6)
conv6 = Convolution2D(96, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv6)
conv6 = BatchNormalization(mode=0, axis=1)(conv6)
conv6 = keras.layers.Add()([conv6,skip6])
conv6 = keras.layers.advanced_activations.ELU()(conv6)
up7 = merge([UpSampling2D(size=(2, 2),dim_ordering='th')(conv6), conv3], mode='concat', concat_axis=1)
skip7 = Convolution2D(48, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(up7)
conv7 = Convolution2D(48, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(up7)
conv7 = BatchNormalization(mode=0, axis=1)(conv7)
conv7 = keras.layers.advanced_activations.ELU()(conv7)
conv7 = Convolution2D(48, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv7)
conv7 = BatchNormalization(mode=0, axis=1)(conv7)
conv7 = keras.layers.Add()([conv7,skip7])
conv7 = keras.layers.advanced_activations.ELU()(conv7)
up8 = merge([UpSampling2D(size=(2, 2),dim_ordering='th')(conv7), conv2], mode='concat', concat_axis=1)
skip8 = Convolution2D(24, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(up8)
conv8 = Convolution2D(24, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(up8)
conv8 = BatchNormalization(mode=0, axis=1)(conv8)
conv8 = keras.layers.advanced_activations.ELU()(conv8)
conv8 = Convolution2D(24, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv8)
conv8 = BatchNormalization(mode=0, axis=1)(conv8)
conv8 = keras.layers.Add()([conv8,skip8])
conv8 = keras.layers.advanced_activations.ELU()(conv8)
up9 = merge([UpSampling2D(size=(2, 2),dim_ordering='th')(conv8), conv1], mode='concat', concat_axis=1)
skip9 = Convolution2D(12, 1, 1, border_mode='same', init='he_uniform',dim_ordering='th')(up9)
conv9 = Convolution2D(12, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(up9)
conv9 = BatchNormalization(mode=0, axis=1)(conv9)
conv9 = keras.layers.advanced_activations.ELU()(conv9)
conv9 = Convolution2D(12, 3, 3, border_mode='same', init='he_uniform',dim_ordering='th')(conv9)
conv9 = keras.layers.Add()([conv9,skip9])
crop9 = Cropping2D(cropping=((16, 16), (16, 16)),dim_ordering='th')(conv9)
conv9 = BatchNormalization(mode=0, axis=1)(crop9)
conv9 = keras.layers.advanced_activations.ELU()(conv9)
conv10 = Convolution2D(num_mask_channels, 1, 1, activation='sigmoid',dim_ordering='th')(conv9)
model = Model(input=inputs, output=conv10)
return model
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def form_batch(X, y, batch_size):
X_batch = np.zeros((batch_size, num_channels, img_rows, img_cols))
y_batch = np.zeros((batch_size, num_mask_channels, img_rows, img_cols))
X_height = X.shape[2]
X_width = X.shape[3]
for i in range(batch_size):
random_width = random.randint(0, X_width - img_cols - 1)
random_height = random.randint(0, X_height - img_rows - 1)
random_image = random.randint(0, X.shape[0] - 1)
y_batch[i] = y[random_image, :, random_height: random_height + img_rows, random_width: random_width + img_cols]
X_batch[i] = np.array(X[random_image, :, random_height: random_height + img_rows, random_width: random_width + img_cols])
return X_batch, y_batch
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
return self.it.next()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
yield threadsafe_iter(f(*a, **kw))
return g
def batch_generator(X, y, batch_size, horizontal_flip=False, vertical_flip=False, swap_axis=False):
while True:
X_batch, y_batch = form_batch(X, y, batch_size)
for i in range(X_batch.shape[0]):
xb = X_batch[i]
yb = y_batch[i]
if horizontal_flip:
if np.random.random() < 0.5:
xb = flip_axis(xb, 1)
yb = flip_axis(yb, 1)
if vertical_flip:
if np.random.random() < 0.5:
xb = flip_axis(xb, 2)
yb = flip_axis(yb, 2)
if swap_axis:
if np.random.random() < 0.5:
xb = xb.swapaxes(1, 2)
yb = yb.swapaxes(1, 2)
X_batch[i] = xb
y_batch[i] = yb
yield X_batch, y_batch[:, :, 16:16 + img_rows - 32, 16:16 + img_cols - 32]
def save_model(model, cross):
json_string = model.to_json()
if not os.path.isdir('cache'):
os.mkdir('cache')
json_name = 'architecture_resnet' + cross + '.json'
weight_name = 'model_weights_resnet' + cross + '.h5'
open(os.path.join('cache', json_name), 'w').write(json_string)
model.save_weights(os.path.join('cache', weight_name), overwrite=True)
def save_history(history, suffix):
filename = 'history/history_' + suffix + '.csv'
| pd.DataFrame(history.history) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import os
from glob import glob
import sys
import gc
from scipy.optimize import curve_fit
from astropy.table import Table
import astropy.io.fits as fits
from astropy.timeseries import LombScargle, BoxLeastSquares
import exoplanet as xo
# import pymc3 as pm
# import theano.tensor as tt
from stuff import FINDflare, EasyE, IRLSSpline
matplotlib.rcParams.update({'font.size':18})
matplotlib.rcParams.update({'font.family':'serif'})
ftype = '.pdf'
# tess_dir = '/data/epyc/data/tess/'
# tess_dir = '/Users/james/Desktop/tess/'
#
# sectors = ['sector001', 'sector002', 'sector003', 'sector004', 'sector005', 'sector006']
#
# # just in case glob wants to re-order things, be sure grab them in Sector order
# sect1 = glob(tess_dir + sectors[0] + '/*.fits', recursive=True)
# sect2 = glob(tess_dir + sectors[1] + '/*.fits', recursive=True)
# sect3 = glob(tess_dir + sectors[2] + '/*.fits', recursive=True)
# sect4 = glob(tess_dir + sectors[3] + '/*.fits', recursive=True)
# sect5 = glob(tess_dir + sectors[4] + '/*.fits', recursive=True)
# sect6 = glob(tess_dir + sectors[5] + '/*.fits', recursive=True)
#
# files = sect1 + sect2 + sect3 + sect4 + sect5 + sect6
# # make into an array for looping later!
# s_lens = [len(sect1), len(sect2), len(sect3), len(sect4), len(sect5), len(sect6)]
# print(s_lens, len(files))
def BasicActivity(sector, tess_dir = '/Users/james/Desktop/tess/',
run_dir = '/Users/james/Desktop/helloTESS/',
clobber=False):
'''
Run the basic set of tools on every light curve
Produce a diagnostic plot for each light curve
Save a file on Rotation stats and a file on Flare stats
'''
print('running ' + tess_dir + sector)
files_i = glob(tess_dir + sector + '/*.fits', recursive=True)
print(str(len(files_i)) + ' .fits files found')
# arrays to hold outputs
per_out = np.zeros(len(files_i)) -1
per_amp = np.zeros(len(files_i)) -1
per_med = np.zeros(len(files_i)) -1
per_std = np.zeros(len(files_i)) -1
ACF_1pk = np.zeros(len(files_i)) -1
ACF_1dt = np.zeros(len(files_i)) -1
blsPeriod = np.zeros(len(files_i)) -1
blsAmpl = np.zeros(len(files_i)) -1
EclNum = np.zeros(len(files_i)) -1
EclDep = np.zeros(len(files_i)) -1
FL_id = np.array([])
FL_t0 = np.array([])
FL_t1 = np.array([])
FL_f0 = np.array([])
FL_f1 = np.array([])
if not os.path.isdir(run_dir + 'figures/' + sector):
os.makedirs(run_dir + 'figures/' + sector)
plt.ioff()
for k in range(len(files_i)):
# print(files_i[k])
if k % 1000 == 0:
print(str(k) + '/'+str(len(files_i)))
tbl = -1
df_tbl = -1
try:
tbl = Table.read(files_i[k], format='fits')
df_tbl = tbl.to_pandas()
except (OSError, KeyError, TypeError, ValueError):
print('k=' + str(k) + ' bad file: ' + files_i[k])
# this is a bit clumsy, but it made sense at the time when trying to chase down some bugs...
if tbl != -1:
# make harsh quality cuts, and chop out a known bad window of time (might add more later)
AOK = (tbl['QUALITY'] == 0) & ((tbl['TIME'] < 1347) | (tbl['TIME'] > 1350))
med = np.nanmedian(df_tbl['PDCSAP_FLUX'][AOK])
# ACF w/ Exoplanet package
acf = xo.autocorr_estimator(tbl['TIME'][AOK], tbl['PDCSAP_FLUX'][AOK] / med,
yerr=tbl['PDCSAP_FLUX_ERR'][AOK] / med,
min_period=0.07, max_period=27, max_peaks=2)
if len(acf['peaks']) > 0:
ACF_1dt[k] = acf['peaks'][0]['period']
ACF_1pk[k] = acf['autocorr'][1][np.where((acf['autocorr'][0] == acf['peaks'][0]['period']))[0]][0]
s_window = int(ACF_1dt[k] / np.abs(np.nanmedian(np.diff(tbl['TIME']))) / 6.)
else:
s_window = 128
# do a running median for a basic smooth
# smo = (df_tbl['PDCSAP_FLUX'][AOK].rolling(128, center=True).median() + df_tbl['PDCSAP_FLUX'][AOK].rolling(256, center=True).median()) / 2.
smo = df_tbl['PDCSAP_FLUX'][AOK].rolling(s_window, center=True).median()
# make an output plot for every file
figname = run_dir + 'figures/' + sector + '/' + files_i[k].split('/')[-1] + '.jpeg' #run_dir + 'figures/longerP/' + TICs[0].split('-')[2] + '.jpeg'
makefig = ((not os.path.exists(figname)) | clobber)
if makefig:
plt.figure(figsize=(12,9))
plt.errorbar(tbl['TIME'][AOK], tbl['PDCSAP_FLUX'][AOK]/med, yerr=tbl['PDCSAP_FLUX_ERR'][AOK]/med,
linestyle=None, alpha=0.15, label='PDC_FLUX')
plt.plot(tbl['TIME'][AOK], smo/med, label=str(s_window)+'pt MED')
if (ACF_1dt[k] > 0):
plt.plot(tbl['TIME'][AOK],
np.nanstd(smo / med) * ACF_1pk[k] * np.sin(tbl['TIME'][AOK] / ACF_1dt[k] * 2 * np.pi) + 1,
label='ACF=' + format(ACF_1dt[k], '6.3f') + 'd, pk=' + format(ACF_1pk[k], '6.3f'), lw=2,
alpha=0.7)
# plt.errorbar(tbl['TIME'][AOK], tbl['SAP_FLUX'][AOK]/Smed, yerr=tbl['SAP_FLUX_ERR'][AOK]/Smed,
# linestyle=None, alpha=0.25, label='SAP_FLUX')
# require at least 1000 good datapoints for analysis
if sum(AOK) > 1000:
# find OK points in the smoothed LC
SOK = np.isfinite(smo)
# do some SPLINE'ing
# spl = IRLSSpline(df_tbl['TIME'].values[AOK][SOK], df_tbl['PDCSAP_FLUX'].values[AOK][SOK] / med,
# df_tbl['PDCSAP_FLUX_ERR'].values[AOK][SOK] / med)
# flares
FL = FINDflare((df_tbl['PDCSAP_FLUX'][AOK][SOK] - smo[SOK])/med,
df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med,
N1=4, N2=2, N3=5, avg_std=False)
if np.size(FL) > 0:
for j in range(len(FL[0])):
FL_id = np.append(FL_id, k)
FL_t0 = np.append(FL_t0, FL[0][j])
FL_t1 = np.append(FL_t1, FL[1][j])
FL_f0 = np.append(FL_f0, med)
FL_f1 = np.append(FL_f1, np.nanmax(tbl['PDCSAP_FLUX'][AOK][SOK][(FL[0][j]):(FL[1][j]+1)]))
if makefig:
if np.size(FL) > 0:
for j in range(len(FL[0])):
plt.scatter(tbl['TIME'][AOK][SOK][(FL[0][j]):(FL[1][j]+1)],
tbl['PDCSAP_FLUX'][AOK][SOK][(FL[0][j]):(FL[1][j]+1)] / med, color='r',
label='_nolegend_')
plt.scatter([],[], color='r', label='Flare?')
# Lomb Scargle
LS = LombScargle(df_tbl['TIME'][AOK], df_tbl['PDCSAP_FLUX'][AOK]/med, dy=df_tbl['PDCSAP_FLUX_ERR'][AOK]/med)
frequency, power = LS.autopower(minimum_frequency=1./40.,
maximum_frequency=1./0.1,
samples_per_peak=7)
best_frequency = frequency[np.argmax(power)]
per_out[k] = 1./best_frequency
per_amp[k] = np.nanmax(power)
per_med[k] = np.nanmedian(power)
per_std[k] = np.nanstd(smo[SOK]/med)
if np.nanmax(power) > 0.05:
LSmodel = LS.model(df_tbl['TIME'][AOK], best_frequency)
if makefig:
plt.plot(df_tbl['TIME'][AOK], LSmodel,
label='L-S P='+format(1./best_frequency, '6.3f')+'d, pk='+format(np.nanmax(power), '6.3f'))
# here is where a simple Eclipse (EB) finder goes
# EE = EasyE(smo[SOK]/med, df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med,
# N1=5, N2=3, N3=2)
EE = EasyE(df_tbl['PDCSAP_FLUX'][AOK][SOK]/med - smo[SOK]/med,
df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK] / med, N1=5, N2=2.5, N3=2.5)
# N1 datapoints long, and
# N2 times below the stddev, and
# N3 times below the error
if (np.size(EE) > 0):
# need to test if EE outputs look periodic-ish, or just junk...
noE = np.arange(len(SOK))
for j in range(len(EE[0])):
if makefig:
plt.scatter(tbl['TIME'][AOK][SOK][(EE[0][j]):(EE[1][j]+1)],
df_tbl['PDCSAP_FLUX'][AOK][SOK][(EE[0][j]):(EE[1][j]+1)] / med,
color='k', marker='s', s=5, alpha=0.75, label='_nolegend_')
noE[(EE[0][j]):(EE[1][j]+1)] = -1
EclDep[k] = EclDep[k] + np.nanmin(df_tbl['PDCSAP_FLUX'][AOK][SOK][(EE[0][j]):(EE[1][j] + 1)] / med - smo[SOK][(EE[0][j]):(EE[1][j] + 1)]/med)
if makefig:
plt.scatter([],[], color='k', marker='s', s=5, alpha=0.75, label='Ecl: '+str(len(EE[0])))
EclNum[k] = len(EE[0])
EclDep[k] = EclDep[k] / np.float(len(EE[0]))
okE = np.where((noE > -1))[0]
else:
okE = np.arange(len(SOK))
# do some GP'ing, from:
# https://exoplanet.dfm.io/en/stable/tutorials/stellar-variability/
# if False:
# with pm.Model() as model:
#
# # The mean flux of the time series
# mean = pm.Normal("mean", mu=1.0, sd=10.0)
#
# # A jitter term describing excess white noise
# # print(AOK.shape, SOK.shape, okE.shape)
# yerr = df_tbl['PDCSAP_FLUX_ERR'].values[AOK][SOK] / med
# y = df_tbl['PDCSAP_FLUX'].values[AOK][SOK] / med
# x = df_tbl['TIME'].values[AOK][SOK]
#
# logs2 = pm.Normal("logs2", mu=2 * np.log(np.min(yerr)), sd=5.0)
#
# # The parameters of the RotationTerm kernel
# logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5.0)
# logperiod = pm.Normal("logperiod", mu=np.log(acf['peaks'][0]['period']), sd=5.0)
# logQ0 = pm.Normal("logQ0", mu=1.0, sd=10.0)
# logdeltaQ = pm.Normal("logdeltaQ", mu=2.0, sd=10.0)
# mix = pm.Uniform("mix", lower=0, upper=1.0)
#
# # Track the period as a deterministic
# period = pm.Deterministic("period", tt.exp(logperiod))
#
# # Set up the Gaussian Process model
# kernel = xo.gp.terms.RotationTerm(
# log_amp=logamp,
# period=period,
# log_Q0=logQ0,
# log_deltaQ=logdeltaQ,
# mix=mix
# )
# gp = xo.gp.GP(kernel, x, yerr ** 2 + tt.exp(logs2), J=4)
#
# # Compute the Gaussian Process likelihood and add it into the
# # the PyMC3 model as a "potential"
# pm.Potential("loglike", gp.log_likelihood(y - mean))
#
# # Compute the mean model prediction for plotting purposes
# pm.Deterministic("pred", gp.predict())
#
# # Optimize to find the maximum a posteriori parameters
# map_soln = xo.optimize(start=model.test_point)
#
# gpspl = map_soln["pred"]
# plt.plot(df_tbl['TIME'].values[AOK][SOK], gpspl+1, label='GP')
# add BLS
bls = BoxLeastSquares(df_tbl['TIME'][AOK][SOK], smo[SOK]/med, dy=df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med)
blsP = bls.autopower([0.05], method='fast', objective='snr',
minimum_n_transit=3, minimum_period=0.1, maximum_period=15,
frequency_factor=1.5)
blsPer = blsP['period'][np.argmax(blsP['power'])]
if ((np.nanmax(blsP['power']) > 2.5*np.nanstd(blsP['power']) + np.nanmedian(blsP['power']) ) &
# (np.nanmax(blsP['power']) > 10.) &
(blsPer < 0.95 * np.nanmax(blsP['period']))
):
blsPeriod[k] = blsPer
blsAmpl[k] = np.nanmax(blsP['power'])
if makefig:
plt.plot([],[], ' ', label='BLS='+format(blsPer, '6.3f')+'d, snr='+format(np.nanmax(blsP['power']), '6.3f'))
if makefig:
# plt.plot(df_tbl['TIME'].values[AOK][SOK], spl, label='spl')
plt.title(files_i[k].split('/')[-1] + ' k='+str(k), fontsize=12)
plt.ylabel('Flux')
plt.xlabel('BJD - 2457000 (days)')
plt.legend(fontsize=10)
# plt.show()
plt.savefig(figname, bbox_inches='tight', pad_inches=0.25, dpi=100)
plt.close()
# reset the data again, not needed, but juuuuuust in case
del tbl
del df_tbl
del AOK
gc.collect()
# write per-sector output files
ALL_TIC = | pd.Series(files_i) | pandas.Series |
# -*- coding: utf-8 -*-
# author: <NAME>
# Email: <EMAIL>
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import generators
from __future__ import with_statement
import re
from bs4 import BeautifulSoup
from concurrent import futures
import os
import sys
import traceback
import time
import datetime
import pandas as pd
import requests
import json
import shutil
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from fake_useragent import UserAgent
from openpyxl import load_workbook
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.header import Header
############ 全局变量初始化 ##############
HEADERS = dict()
# 并发线程数
NUM_THREADS = None
# 城市选择
city_dict = {
"成都": "cd",
"北京": "bj",
"上海": "sh",
"广州": "gz",
"深圳": "sz",
"南京": "nj",
"合肥": "hf",
"杭州": "hz",
}
# 是否打印HTTP错误
PRINT = True if ((len(sys.argv) > 1) and (sys.argv[1] == 'true')) else False
# 伪造User-Agent库初始化
ua = UserAgent()
# 不使用代理
proxies = None
WORKPATH="/home/frank/workspace/lianjia/data"
CITY = city_dict["北京"]
""" HTTP GET 操作封装 """
def get_bs_obj_from_url(http_url):
done = False
exception_time = 0
HEADERS["User-Agent"] = ua.random
while not done:
try:
if PRINT:
print("正在获取 {}".format(http_url))
r = requests.get(http_url, headers=HEADERS, proxies=proxies, timeout=3)
bs_obj = BeautifulSoup(r.text, "lxml")
done = True
except Exception as e:
if PRINT:
print(e)
exception_time += 1
time.sleep(1)
if exception_time > 10:
return None
return bs_obj
""" 判断一个字符串是否可以转成数字 """
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def esf_mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print("{} create successfully.".format(path))
return True
else:
print("{} already exist.".format(path))
return False
def get_district_from_city(city):
print("---get {} districts---".format(city))
city_url = "http://{}.lianjia.com".format(city)
http_url = city_url + "/ershoufang"
bs_obj = get_bs_obj_from_url(http_url)
parent_div = bs_obj.find("div", {"data-role": "ershoufang"})
a_list = parent_div.find_all("a")
district_list = [a.attrs["href"].replace("/ershoufang/", "")[:-1]
for a in a_list
if a.attrs['href'].startswith("/ershoufang")]
print("---total {} districts---".format(len(district_list)))
return district_list
def get_district_name_from_city(city):
print("---get {} districts---".format(city))
city_url = "http://{}.lianjia.com".format(city)
http_url = city_url + "/ershoufang"
bs_obj = get_bs_obj_from_url(http_url)
parent_div = bs_obj.find("div", {"data-role": "ershoufang"})
a_list = parent_div.find_all("a")
name_list = [a.get_text() for a in a_list
if a.attrs['href'].startswith("/ershoufang")]
print("---total {} districts---".format(len(name_list)))
return name_list
def get_esf_from_district(city, district):
http_url = "http://{}.lianjia.com/ershoufang/{}".format(city, district)
bs_obj = get_bs_obj_from_url(http_url)
esf_list = []
try:
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
#try again
try:
bs_obj = get_bs_obj_from_url(http_url)
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
return esf_list
print("---district {} total ershoufang numbers: {}---".format(district, total_esf_num))
if total_esf_num == 0:
print("---district {} total get {}/{}---\n".format(district, len(esf_list), total_esf_num))
return esf_list
for price in range(1, 9):
esf_list_partial = get_esf_id_in_price(city, district, price)
if esf_list_partial is not None and len(esf_list_partial) > 0:
esf_list += esf_list_partial
print("---district {} total get {}/{}---\n".format(district, len(esf_list), total_esf_num))
return esf_list
def get_esf_id_in_price(city, district, price):
http_url = "http://{}.lianjia.com/ershoufang/{}/p{}".format(city, district, price)
bs_obj = get_bs_obj_from_url(http_url)
total_esf_num = 0
try:
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
print(" price {} get error.".format(price))
pass
#print("------price {} total : {}---".format(price, total_esf_num))
esf_list = []
if total_esf_num == 0:
print(" price {} finish---done.".format(price))
return esf_list
try:
page_box = bs_obj.find("div", {"class": "page-box house-lst-page-box"})
total_pages = int(json.loads(page_box.attrs["page-data"])["totalPage"])
except Exception as e:
print(" price {} page get error.".format(price))
return esf_list
with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
future_list = []
for page_no in range(1, total_pages + 1):
future_list.append(executor.submit(get_esf_id_in_page, city, district, price, page_no))
fail_list = []
count = 0
for future in futures.as_completed(future_list):
page_no, esf_list_partial = future.result()
if esf_list_partial is None or len(esf_list_partial) == 0:
fail_list.append(page_no)
else:
esf_list += esf_list_partial
count += 1
sys.stdout.write("\r price {} finish {}/{}".format(price, len(esf_list), total_esf_num))
for page_no in fail_list:
_, esf_list_partial = get_esf_id_in_page(city, district, price, page_no)
if esf_list_partial is not None and len(esf_list_partial) > 0:
esf_list += esf_list_partial
count += 1
sys.stdout.write("\r price {} finish {}/{}".format(price, len(esf_list), total_esf_num))
print("---done.")
return esf_list
def get_esf_id_in_page(city, district, price, page_no):
http_url = "http://{}.lianjia.com/ershoufang/{}/pg{}p{}".format(city, district, page_no, price)
bs_obj = get_bs_obj_from_url(http_url)
if bs_obj is None:
print("get ershoufang id, price {} page {} is none".format(price, page_no))
return None
parent_list = bs_obj.find_all("li", {"class": "clear"})
esf_list = []
if not (len(parent_list) == 0):
for li in parent_list:
esf_url = str(li.find("div", {"class": "title"}).find("a").attrs["href"])
esf_id = "".join(list(filter(str.isdigit, esf_url)))
esf_list.append(esf_id)
return page_no, esf_list
def get_esf_of_city(city):
district_list = get_district_from_city(city)
esf_list = []
for district in district_list:
esf_of_district = get_esf_from_district(city, district)
esf_list += esf_of_district
esf_list = sorted(set(esf_list), key=esf_list.index)
return esf_list
def get_esf_info(city, esf_id):
http_url = "https://{}.lianjia.com/ershoufang/{}.html".format(city, esf_id)
bs_obj = get_bs_obj_from_url(http_url)
df = pd.DataFrame()
if bs_obj is not None:
try:
test = bs_obj.find("div", {"class": "icon-404 icon fl"})
if test is not None:
return esf_id, df
total_price = bs_obj.find("span", {"class": "total"}).get_text()
if not is_number(total_price):
return esf_id, df
unit_price = bs_obj.find("div", {"class": "unitPrice"}).get_text().replace("元/平米", "")
huxing = bs_obj.find("div", {"class": "room"}).find("div", {"class": "mainInfo"}).get_text()
xiaoqu = bs_obj.find("div", {"class": "communityName"}).find("a").get_text()
area_info = bs_obj.find("div", {"class": "areaName"}).find_all("a")
chengqu = area_info[0].get_text()
quyu = area_info[1].get_text()
base_info = bs_obj.find("div", {"class": "newwrap baseinform"})
# 基本属性
base = base_info.find("div", {"class": "base"}).get_text()
louceng = None if "所在楼层" not in base else base.split("所在楼层")[1].split("(")[0]
zonglouceng = None if "所在楼层" not in base else base.split("(共")[1].split("层")[0]
jianzhumianji = None if "建筑面积" not in base else base.split("建筑面积")[1].split("㎡")[0]
if not is_number(jianzhumianji):
return esf_id, df
huxingjiegou = None if "户型结构" not in base else base.split("户型结构")[1].split("\n")[0]
if "套内面积" not in base:
taoneimianji = None
elif "暂无数据" in base.split("套内面积")[1].split("\n")[0]:
taoneimianji = None
else:
taoneimianji = base.split("套内面积")[1].split("㎡")[0]
jianzhuleixing = None if "建筑类型" not in base else base.split("建筑类型")[1].split("\n")[0]
chaoxiang = None if "房屋朝向" not in base else base.split("房屋朝向")[1].split("\n")[0]
jianzhujiegou = None if "建筑结构" not in base else base.split("建筑结构")[1].split("\n")[0]
zhuangxiu = None if "装修情况" not in base else base.split("装修情况")[1].split("\n")[0]
tihubili = None if "梯户比例" not in base else base.split("梯户比例")[1].split("\n")[0]
gongnuan = None if "供暖方式" not in base else base.split("供暖方式")[1].split("\n")[0]
dianti = None if "配备电梯" not in base else base.split("配备电梯")[1].split("\n")[0]
chanquan = None if "产权年限" not in base else base.split("产权年限")[1].split("\n")[0]
yongshui = "商水" if base_info.find(text="商水") is not None else "民水"
yongdian = "商电" if base_info.find(text="商电") is not None else "民电"
# 交易属性
trans = base_info.find("div", {"class": "transaction"}).get_text()
guapaishijian = None if "挂牌时间" not in trans else trans.split("挂牌时间")[1].strip().split("\n")[0]
jiaoyiquanshu = None if "交易权属" not in trans else trans.split("交易权属")[1].strip().split("\n")[0]
fangwuyongtu = None if "房屋用途" not in trans else trans.split("房屋用途")[1].strip().split("\n")[0]
fangwunianxian = None if "房屋年限" not in trans else trans.split("房屋年限")[1].strip().split("\n")[0]
chanquansuoshu = None if "产权所属" not in trans else trans.split("产权所属")[1].strip().split("\n")[0]
diyaxinxi = None if "抵押信息" not in trans else trans.split("抵押信息")[1].strip().split("\n")[0]
df = pd.DataFrame(index=[esf_id], data=[[http_url, chengqu, quyu, xiaoqu,
huxing, total_price, unit_price, jianzhumianji,
taoneimianji, chaoxiang, louceng, zonglouceng,
huxingjiegou, jianzhuleixing, jianzhujiegou,
fangwuyongtu, jiaoyiquanshu, fangwunianxian,
guapaishijian, zhuangxiu, tihubili, gongnuan,
dianti, chanquan, yongshui, yongdian,
chanquansuoshu, diyaxinxi]],
columns=["URL", "城区", "片区", "小区",
"户型", "总价", "单价", "建筑面积",
"套内面积", "朝向", "楼层", "总楼层",
"户型结构", "建筑类型", "建筑结构",
"房屋用途", "交易权属", "房屋年限",
"挂牌时间", "装修", "梯户比例", "供暖",
"配备电梯", "产权", "用水", "用电",
"产权所属", "抵押信息"])
except Exception as e:
print("[E]: get_esf_info, esf_id =", esf_id, e)
traceback.print_exc()
pass
return esf_id, df
def get_esf_info_from_esf_list(city, esf_list):
df_esf_info = pd.DataFrame()
count = 0
pct = 0
with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
future_list = []
for esf in esf_list:
future_list.append(executor.submit(get_esf_info, city, esf))
fail_list = []
#print(" ")
for future in futures.as_completed(future_list):
esf, df_info_partial = future.result()
if len(df_info_partial) == 0:
fail_list.append(esf)
else:
df_esf_info = df_esf_info.append(df_info_partial)
count += 1
sys.stdout.write("\rget ershoufang info: {}/{}".format(count, len(esf_list)))
for esf in fail_list:
_, df_info_partial = get_esf_info(city, esf)
if len(df_info_partial) > 0:
df_esf_info = df_esf_info.append(df_info_partial)
count += 1
sys.stdout.write("\rget ershoufang info: {}/{}".format(count, len(esf_list)))
print(" ")
return df_esf_info
def compare_two_list(new_esf_list, old_esf_list):
add_list = []
remove_list = []
same_list = []
for esf_id in new_esf_list:
if esf_id not in old_esf_list:
add_list.append(esf_id)
else:
same_list.append(esf_id)
for esf_id in old_esf_list:
if esf_id not in new_esf_list:
remove_list.append(esf_id)
return add_list, remove_list, same_list
def excel_add_sheet(dataframe, filename, sheetname, indexname):
excelwriter = pd.ExcelWriter(filename)
book = load_workbook(excelwriter.path)
excelwriter.book = book
dataframe.to_excel(excelwriter, sheetname, index_label=indexname)
excelwriter.close()
return
def get_price_changed_esf_info(same_list, new_esf_info, old_esf_info):
df_jiang = pd.DataFrame()
df_zhang = pd.DataFrame()
count = 0
for esf_id in same_list:
try:
new_price = new_esf_info.loc[[esf_id]]["总价"].values[0]
old_price = old_esf_info.loc[[esf_id]]["总价"].values[0]
old_unit_price = old_esf_info.loc[esf_id]["单价"]
new_info = new_esf_info.loc[[esf_id]]
if new_price > old_price:
new_info.insert(loc=6, column="原总价", value=old_price)
new_info.insert(loc=7, column="涨价", value=(new_price-old_price))
zhangfu=format(((new_price-old_price)/old_price), '.2%')
new_info.insert(loc=8, column="涨幅", value=zhangfu)
new_info.insert(loc=10, column="原单价", value=old_unit_price)
df_zhang = df_zhang.append(new_info)
elif new_price < old_price:
new_info.insert(loc=6, column="原总价", value=old_price)
new_info.insert(loc=7, column="降价", value=(old_price-new_price))
diefu=format(((old_price-new_price)/old_price), '.2%')
new_info.insert(loc=8, column="降幅", value=diefu)
new_info.insert(loc=10, column="原单价", value=old_unit_price)
df_jiang = df_jiang.append(new_info)
else:
pass
except Exception as e:
print("[E]: get_price_changed, esf_id =", esf_id, e)
pass
count += 1
sys.stdout.write("\rget price change info: {}/{}".format(count, len(same_list)))
print(" ")
return df_jiang, df_zhang
def get_chengjiao_yesterday(city):
district_list = get_district_from_city(city)
chengjiao = 0
for district in district_list:
http_url = 'https://{}.lianjia.com/fangjia/{}'.format(city, district)
bs_obj = get_bs_obj_from_url(http_url)
if bs_obj is None:
chengjiao += 0
continue
item = bs_obj.find("div", {"class": "item item-1-2"})
if item is None:
chengjiao += 0
continue
num = item.find("div", {"class": "num"}).find("span").get_text()
chengjiao += (0 if "暂无数据" in num else int(num))
return chengjiao
def get_lianjia_fangjia_info(city):
try:
http_url = 'https://{}.lianjia.com/fangjia'.format(city)
bs_obj = get_bs_obj_from_url(http_url)
tongji = bs_obj.find("div", {"class": "box-l-b"})
lj_all = tongji.find_all("div", {"class": "num"})
lj_new = lj_all[0].get_text()
lj_ren = lj_all[1].get_text()
lj_kan = lj_all[2].get_text()
except Exception as e:
lj_new, lj_ren, lj_kan = get_lianjia_fangjia_info(city)
return lj_new, lj_ren, lj_kan
def get_tongji_info(city, filename):
lj_new, lj_ren, lj_kan = get_lianjia_fangjia_info(city)
chengjiao = get_chengjiao_yesterday(city)
new_str = datetime.date.today().strftime('%Y-%m-%d')
total_info = pd.read_excel(filename, sheet_name="total", index_col=0)
total_list = total_info.index.values
new_info = pd.read_excel(filename, sheet_name="新上", index_col=0)
new_list = new_info.index.values
rm_info = pd.read_excel(filename, sheet_name="下架", index_col=0)
rm_list = rm_info.index.values
jiang_info = pd.read_excel(filename, sheet_name="降价", index_col=0)
jiang_list = jiang_info.index.values
zhang_info = pd.read_excel(filename, sheet_name="涨价", index_col=0)
zhang_list = zhang_info.index.values
junjia = format(sum(total_info['总价']) * 10000 / sum(total_info['建筑面积']), '.2f')
jiangfu = (jiang_info['降幅'].str.strip("%").astype(float)/100) if len(jiang_list) else 0
junjiang = (format(sum(jiangfu) / len(jiangfu), '.2%')) if len(jiang_list) else 0
zhangfu = (zhang_info['涨幅'].str.strip("%").astype(float)/100) if len(zhang_list) else 0
junzhang = (format(sum(zhangfu) / len(zhangfu), '.2%')) if len(zhang_list) else 0
data=[[len(total_list), junjia, chengjiao, len(new_list), len(rm_list),
len(jiang_list), junjiang, len(zhang_list), junzhang, lj_new,
lj_ren, lj_kan]]
columns=['总数', '均价', '成交', '上架', '下架', '降价', '降幅', '涨价',
'涨幅', '新上', '新客户', '带看']
name_list = get_district_name_from_city(city)
for name in name_list:
chengqu = total_info[total_info['城区']==name]
avg_price = format(sum(chengqu['总价']) * 10000 /
sum(chengqu['建筑面积']), '.2f') if len(chengqu) else 0
data[0].append(avg_price)
columns.append(name)
info = pd.DataFrame(index=[new_str], data=data, columns=columns)
return info
def get_email_content(info):
content = '本期统计信息:\n'
content += '线上总套数:{}套,'.format(info['总数'].values[0])
content += '均价:{}元/平米\n'.format(info['均价'].values[0])
content += '昨日成交数:{}套\n'.format(info['成交'].values[0])
content += '新上房源数:{}套\n'.format(info['上架'].values[0])
content += '下架房源数:{}套\n'.format(info['下架'].values[0])
content += '降价房源数:{}套,'.format(info['降价'].values[0])
content += '均降:{}\n'.format(info['降幅'].values[0])
content += '涨价房源数:{}套,'.format(info['涨价'].values[0])
content += '均涨:{}\n'.format(info['涨幅'].values[0])
content += '\n'
content += '链家统计信息:\n'
content += '新增房源数:{}套\n'.format(info['新上'].values[0])
content += '新增客户数:{}人\n'.format(info['新客户'].values[0])
content += '新增带看数:{}次\n'.format(info['带看'].values[0])
return content
def addimg(src, imgid):
fp = open(src, 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
msgImage.add_header('Content-ID', imgid)
return msgImage
def send_email(content, filename):
sender = '<EMAIL>'
receivers = ['<EMAIL>']
key = open('../key', 'r').read()
message = MIMEMultipart()
message['From'] = sender
message['Subject'] = Header(filename, 'utf-8')
#message.attach(MIMEText(content, 'plain', 'utf-8'))
html = '<p>{}</p>'.format(content.replace('\n', '<br>'))
html += '<p><img src="cid:image1"></p>'
html += '<p><img src="cid:image2"></p>'
message.attach(MIMEText(html, 'html', 'utf-8'))
message.attach(addimg("total.jpg","image1"))
message.attach(addimg("chengqu.jpg","image2"))
att = MIMEText(open(filename, 'rb').read(), 'base64', 'utf-8')
att["Content-Type"] = 'application/octet-stream'
att_str = 'attachment; filename={}'.format(filename)
att["Content-Disposition"] = att_str
message.attach(att)
try:
smtpObj = smtplib.SMTP('smtp.qq.com')
smtpObj.login(sender, key)
smtpObj.sendmail(sender, receivers, message.as_string())
print("send email successfully.")
except smtplib.SMTPException:
print("send email failed.")
return
def get_tongji_plot(filename):
info = | pd.read_excel(filename, sheet_name="统计", index_col=0) | pandas.read_excel |
import fileinput
import pandas as pd
import numpy as np
import drep
import os
import shutil
import json
import re
from PyPDF2 import PdfFileReader
from .dprint import dprint
from .config import _globals
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 80)
TAGS = [tag + '_TAG' for tag in ['JSON', 'COLUMNS', 'FIGURES', 'WARNINGS', 'CMD']]
class HTMLBuilder():
def __init__(self, binnedContigs_l, dRep_cmd, dRep_workDir, html_dir):
self.binnedContigs_l = binnedContigs_l
self.dRep_cmd = dRep_cmd
self.dRep_workDir = dRep_workDir
self.html_dir = html_dir
self.html_path = os.path.join(html_dir, 'dRep_dereplicate_report.html')
self.figures_dir = os.path.join(html_dir, 'figures')
self.replacements = dict()
self._build()
def _build(self):
self._build_command()
self._build_summary()
self._build_figures()
self._build_warnings()
for line in fileinput.input(self.html_path, inplace=True):
line_stripped = line.strip()
if line_stripped in TAGS and line_stripped in self.replacements:
print(self.replacements[line_stripped])
else:
print(line, end='')
def _build_command(self):
self.replacements['CMD_TAG'] = self.dRep_cmd
def _build_summary(self):
'''Build data frame from dRep output'''
ignoreGenomeQuality = '--ignoreGenomeQuality' in self.dRep_cmd
###
### Initialize dataframe with names and NaNs
names = ['BinnedContigs Name', 'Bin Name', 'File Name']
attr = ['Length', 'N50', 'GC', 'Completeness', 'Contamination', 'Strain Heterogeneity']
preproc = ['Length Filtered']
res = ['CheckM Filtered', 'Prim/Sec Cluster', 'Dereplicated']
attr_chdb = ['Genome size (bp)', 'N50 (scaffolds)', 'GC', 'Completeness', 'Contamination', 'Strain heterogeneity'] # Chdb.csv header names corresponding to attr
if not ignoreGenomeQuality:
columns = names + attr[:3] + preproc + attr[3:] + res
else:
columns = names + attr[:3] + preproc + res[1:]
smmr = pd.DataFrame(columns=columns)
for binnedContigs in self.binnedContigs_l:
for bin_name in binnedContigs.original_bin_name_list:
smmr.loc[len(smmr)] = [binnedContigs.name, bin_name, binnedContigs.transform_bin_name(bin_name)] + [np.nan] * (len(columns) - len(names))
###
### Insert genome attribute info
smmr = smmr.set_index('File Name')
# read drep's data tables with the transformed bin/genome/file name as index
bdb = pd.read_csv(os.path.join(self.dRep_workDir, 'data_tables/Bdb.csv'), index_col='genome') # has all genomes that passed length then completeness/contamination filter
cdb = pd.read_csv(os.path.join(self.dRep_workDir, 'data_tables/Cdb.csv'), index_col='genome') # ''
wdb = pd.read_csv(os.path.join(self.dRep_workDir, 'data_tables/Wdb.csv'), index_col='genome') # has all genomes that passed length filter then completeness/contamination filter then dereplication
if not ignoreGenomeQuality:
chdb = pd.read_csv(os.path.join(self.dRep_workDir, 'data_tables/Chdb.csv'), index_col='Bin Id') # has all genomes that passed length filter
# get custom stats for each BinnedContigs and concat with file name as index
df_stats_l = []
for binnedContigs in self.binnedContigs_l:
df_stats = pd.DataFrame.from_dict(binnedContigs.stats['bin_stats'], orient='index')
df_stats_l.append(df_stats)
df_stats = | pd.concat(df_stats_l) | pandas.concat |
# column deletion using del operator and pop method of pandas dataframe
import pandas as pd
import numpy as np
d={'one': | pd.Series([1,2,3],index=['a','b','c']) | pandas.Series |
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from skimage.io import imread, imsave
from skimage.transform import resize
import pandas as pd
import os
from tqdm import tqdm
root_dir = "./birds"
save_dir = "./birds_preprocessed/"
IMG_SIZE = 64
# utility functions -> from STACKGAN birds pre-processing code
def cropper(img, bbox):
imsiz = img.shape
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
R = int(np.maximum(bbox[2], bbox[3]) * 0.75)
y1 = np.maximum(0, center_y - R)
y2 = np.minimum(imsiz[0], center_y + R)
x1 = np.maximum(0, center_x - R)
x2 = np.minimum(imsiz[1], center_x + R)
img_cropped = img[y1:y2, x1:x2, :]
return img_cropped
# start code here
df_images = pd.read_csv(root_dir+"/CUB_200_2011/images.txt",header=None,
sep=" ",names=["id","image"])
df_train_ids = pd.read_csv(root_dir+"/CUB_200_2011/train_test_split.txt",header=None,
sep=" ", names=["id", "is_train"])
df_bbox = pd.read_csv(root_dir+"/CUB_200_2011/bounding_boxes.txt",header=None,
sep=" ", names=["id", "x", "y", "width", "height"]
).astype("int")
df = pd.merge(df_images, df_train_ids, on="id")
df = | pd.merge(df, df_bbox, on="id") | pandas.merge |
import hashlib
import pandas as pd
from pandas import DataFrame
from pathlib import Path
from struct import calcsize
from struct import unpack
from tqdm import tqdm
from jotdx.consts import MARKET_BJ
from jotdx.consts import MARKET_SH
from jotdx.consts import MARKET_SZ
from jotdx.logger import logger
def get_stock_markets(symbols=None):
results = []
assert isinstance(symbols, list), 'stock code need list type'
if isinstance(symbols, list):
for symbol in symbols:
results.append([get_stock_market(symbol, string=False), symbol.strip('sh').strip('sz')])
return results
def get_stock_market(symbol='', string=False):
""" 判断股票ID对应的证券市场匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '12','13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param string: False 返回市场ID,否则市场缩写名称
:param symbol: 股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'
"""
assert isinstance(symbol, str), 'stock code need str type'
market = 'sh'
if symbol.startswith(('sh', 'sz', 'SH', 'SZ')):
market = symbol[:2].lower()
elif symbol.startswith(('50', '51', '60', '68', '90', '110', '113', '132', '204')):
market = 'sh'
elif symbol.startswith(('00', '12', '13', '18', '15', '16', '18', '20', '30', '39', '115', '1318')):
market = 'sz'
elif symbol.startswith(('5', '6', '9', '7')):
market = 'sh'
elif symbol.startswith(('4', '8')):
market = 'bj'
if string is False:
if market == 'sh':
market = MARKET_SH
if market == 'sz':
market = MARKET_SZ
if market == 'bj':
market = MARKET_BJ
logger.debug(f'market=>{market}')
return market
def gpcw(filepath):
cw_file = open(filepath, 'rb')
header_size = calcsize('<3h1H3L')
stock_item_size = calcsize('<6s1c1L')
data_header = cw_file.read(header_size)
stock_header = unpack('<3h1H3L', data_header)
max_count = stock_header[3]
for idx in range(0, max_count):
cw_file.seek(header_size + idx * calcsize('<6s1c1L'))
si = cw_file.read(stock_item_size)
stock_item = unpack('<6s1c1L', si)
code = stock_item[0].decode()
foa = stock_item[2]
cw_file.seek(foa)
info_data = cw_file.read(calcsize('<264f'))
cw_info = unpack('<264f', info_data)
logger.debug(f'{code}, {cw_info}')
return code, cw_info
def md5sum(downfile):
"""
文件的 md5 哈希值
:param downfile: 文件路径
:return: mixed
"""
try:
md5_l = hashlib.md5()
md5_l.update(Path(downfile).read_bytes())
return md5_l.hexdigest()
except (IOError, FileNotFoundError) as e:
logger.error(f'无法读取文件: {downfile}')
logger.debug(e)
return None
def to_data(v, **kwargs):
"""
数值转换为 pd.DataFrame
:param v: mixed
:return: pd.DataFrame
"""
symbol = kwargs.get('symbol')
adjust = kwargs.get('adjust', None)
client = kwargs.get('client', None)
if adjust in ['01', 'qfq', 'before']:
adjust = 'qfq'
elif adjust in ['02', 'hfq', 'after']:
adjust = 'hfq'
else:
adjust = None
# 空值
if not v:
return pd.DataFrame(data=[])
# DataFrame
if isinstance(v, DataFrame):
result = v
# 列表
elif isinstance(v, list):
result = pd.DataFrame(data=v) if len(v) else None
# 字典
elif isinstance(v, dict):
result = | pd.DataFrame(data=[v]) | pandas.DataFrame |
import numpy
import matplotlib.pyplot as plt
import pandas
from pandas import DataFrame
import math
import yfinance as yf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
import sys
import os
import time
import random
import requests
from datetime import datetime
import hashlib
import hmac
from urllib.parse import urlparse
import json
#GETS NEW DATA FOR BTC PRICE FROM YAHOO FINANCE
crypto = "BTC-USD"
#crypto = "ETH-USD"
btc = yf.Ticker(crypto)
history = btc.history(period='1mo',interval="90m")
history.to_csv('out.csv')
#tickerdata = pandas.read_csv('BTCUSDT.csv')
#tickerdata = DataFrame(tickerdata)
#print(tickerdata.values[1])
#TESTING
futuretime = 1
def predictdata(tickerdata):
global futuretime
scaler = MinMaxScaler(feature_range = (0,1))
scaler1 = MinMaxScaler(feature_range = (0,1))
#i = 0
dataset = DataFrame()
dataset1 = DataFrame()
for i in range(1,len(tickerdata)):
if i <= int(len(tickerdata) * 0.6):
dataset = dataset.append(tickerdata.iloc[i])
if i >= int(len(tickerdata) * 0.6):
dataset1 = dataset1.append(tickerdata.iloc[i])
#file = open("1minprices.txt","r")
#newdata = file.readlines()
#file.close()
#for item in newdata:
dataset = DataFrame(dataset)
dataset = scaler.fit_transform(dataset)
dataset1 = DataFrame(dataset1)
dataset1 = scaler1.fit_transform(dataset1)
#PRINTS REAL DATA FOR COMPARISON
print(dataset1[0])
#plt.plot(dataset)
#plt.plot(dataset1)
#plt.show()
#INITIATES NETWORK
mind = Sequential()
trainx = []
trainy = []
testx = []
testy = []
#AMOUNT OF TIME ALGO SHOULD SEE IN THE PAST
#(IF DATA IS 1 DAY DATA, THEN 1 TIME STEP = 1 DAY)
timesteps = 30
#ADDS ITEMS TO TRAINING DATASET
for i in range(timesteps, len(dataset)):
trainx.append(dataset[i-timesteps:i, :])
trainy.append(dataset[i])
trainx = numpy.array(trainx)
trainy = numpy.array(trainy)
#ADDS ITEMS TO TEST DATASET
for i in range(timesteps, len(dataset1)):
testx.append(dataset1[i-timesteps:i, :])
testy.append(dataset1[i])
testx = numpy.array(testx)
testy = numpy.array(testy)
print(trainx.shape)
#BUILDS AND COMPILES MODEL
mind.add(LSTM(50, return_sequences=True,input_shape=(trainx.shape[1], trainx.shape[2]) ))
mind.add(Dropout(0.6))
mind.add(LSTM(50, return_sequences=True ))
mind.add(Dropout(0.6))
mind.add(LSTM(50, return_sequences=True ))
mind.add(Dropout(0.6))
mind.add(LSTM(50))
mind.add(Dropout(0.6))
mind.add(Dense(1,activation='linear'))
mind.compile(loss='mean_squared_error', optimizer='adam')
os.system('cls')
#SAVE WEIGHTS
#cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,save_weights_only=True,verbose=1)
#TRAINS ALGO
mind.fit(trainx, trainy, epochs=5, batch_size=60)#,callbacks=[cp_callback]
os.system('cls')
#FEED IN TESTX (60 timesteps or days)
#FOR LOOP THAT FEEDS PREDICTED NEW DATA BACK INTO DATASET
#TO GET THE PREDICTED FORCAST
datasettemp = dataset1
for i in range(futuretime):
trainprediction = mind.predict(testx)
testx = []
datasettemp = numpy.append(datasettemp,trainprediction[int(len(trainprediction) - 1)][0])
datasettemp = datasettemp.reshape(datasettemp.shape[0], 1)
print("Predicted Price: "+str(datasettemp[ int(len(datasettemp)-1) ]))
for i in range(timesteps, len(datasettemp)):
testx.append(datasettemp[i-timesteps:i, :])
testx = numpy.array(testx)
#CONVERTS STANDARDIZED DATA TO NORMAL DATA
trainprediction = scaler1.inverse_transform(trainprediction)
datasettocompare = scaler1.inverse_transform(dataset1)
return trainprediction, datasettocompare
#COMPARES TODAY'S ESTIMATED PRICE AND X DAY'S PREDICTED PRICE TO GET
#PREDICTED PRICE MOVEMENT
#BUY AND SELL API
#30 BTCUSD = 1 BTCUSDT
def generate_signature(secret, http_method, url, expires, data):
# parse relative path
parsedURL = urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
if isinstance(data, (bytes, bytearray)):
data = data.decode('utf8')
print("Computing HMAC: %s" % http_method + path + str(expires) + data)
message = http_method + path + str(expires) + data
signature = hmac.new(bytes(secret, 'utf8'), bytes(message, 'utf8'), digestmod=hashlib.sha256).hexdigest()
return signature
file = open("api.txt","r")
keys = file.read()
file.close()
apikey = keys.split(":")[0].strip().replace("\n","").replace("\r","")
apisecret = keys.split(":")[1].strip().replace("\n","").replace("\r","")
def cancelorder(theid):
try:
global apikey
global apisecret
for _ in range(3):
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'DELETE',str("https://api.basefex.com/orders/")+str(theid),expires, '')
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
requests.delete(str("https://api.basefex.com/orders/")+str(theid), headers=hed)
time.sleep(1)
except:
print("Random error, trying again")
def getopentrades(symbol, status, side):
try:
global apikey
global apisecret
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'GET',str('/orders/count?status='+str(status)+'&side='+str(side)+'&symbol='+str(symbol)),expires, '')
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
response = requests.get("https://api.basefex.com/orders/count?status="+str(status)+"&side="+str(side)+"&symbol="+str(symbol), headers=hed)
print(response.text)
orders = str(str(response.text).split('"count":')[1].split("}")[0].strip())
orders = int(orders)
return orders
except:
print("Random error, trying again")
def tradesopen(symbol, side,previousamount):
try:
newamount = getopentrades(symbol,"FILLED",side)
tradeson = None
if newamount > previousamount:
tradeson = True
else:
tradeson = False
return tradeson
except:
print("Random error, trying again")
def tradesnew(symbol, side,previousamount):
try:
newamount = getopentrades(symbol,"NEW",side)
tradeson = None
if newamount < previousamount or int(newamount) == 0:
tradeson = True
else:
tradeson = False
return tradeson
except:
print("Random error, trying again")
def long(symbol,amount, price, numbuy, numsell):
try:
global apikey
global apisecret
args = {'size':str(amount),
'symbol':str(symbol),
'type':'LIMIT',
'side':'BUY',
'price':str(price)}
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'POST','https://api.basefex.com/orders',expires, json.dumps(args))
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
response = requests.post("https://api.basefex.com/orders", json=args, headers=hed)
response = response.text
print(response)
time.sleep(3)
numnewbuy = getopentrades(symbol,"NEW","BUY")
numnewsell = getopentrades(symbol,"NEW","SELL")
theid = str(str(response).split('"id":"')[1].split('",')[0].strip().replace("\r","").replace("\n",""))
for _ in range(3):
try:
time.sleep(2)
print("Checking for trade finished")
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
print("long pos: Amount: "+str(amount)+" Symbol: "+str(symbol)+" Price: "+str(price))
return True
except:
print("Error longing, trying again")
time.sleep(3)
for _ in range(10):
try:
print("Error placing order in time. Cancelling")
#Last check before cancelling
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
return True
cancelorder(theid)
for _ in range(3):
time.sleep(2)
print("Checking for trade cancelled")
if tradesnew(symbol,"BUY",numnewbuy) == True and tradesnew(symbol,"SELL",numnewsell) == True:
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
return True
print("Successfully cancelled trade")
return False
except:
print("Error cancelling, trying again")
except:
print("Random error, trying again")
def short(symbol,amount,price, numbuy, numsell):
try:
global apikey
global apisecret
args = {'size':str(amount),
'symbol':str(symbol),
'type':'LIMIT',
'side':'SELL',
'price':str(price)}
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'POST','https://api.basefex.com/orders',expires, json.dumps(args))
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
response = requests.post("https://api.basefex.com/orders", json=args, headers=hed)
response = response.text
print(response)
time.sleep(3)
numnewbuy = getopentrades(symbol,"NEW","BUY")
numnewsell = getopentrades(symbol,"NEW","SELL")
theid = str(str(response).split('"id":"')[1].split('",')[0].strip().replace("\r","").replace("\n",""))
for _ in range(3):
try:
time.sleep(2)
print("Checking for trade finished")
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
print("short pos: Amount: "+str(amount)+" Symbol: "+str(symbol)+" Price: "+str(price))
return True
except:
print("Error shorting, trying again")
time.sleep(3)
for _ in range(10):
try:
print("Error placing order in time. Cancelling")
#Last check before cancelling
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
return True
cancelorder(theid)
for _ in range(3):
time.sleep(2)
print("Checking for trade cancelled")
if tradesnew(symbol,"BUY",numnewbuy) == True and tradesnew(symbol,"SELL",numnewsell) == True:
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
return True
print("Successfully cancelled trade")
return False
except:
print("Error cancelling, trying again")
except:
print("Random error, trying again")
def closelong(symbol,amount, price, numbuy, numsell):
try:
global apikey
global apisecret
args = {'size':str(amount),
'symbol':str(symbol),
'type':'MARKET',
'side':'SELL',
'price':str(price)}
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'POST','https://api.basefex.com/orders',expires, json.dumps(args))
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
requests.post("https://api.basefex.com/orders", json=args, headers=hed)
return True
except:
print("Random error, trying again")
def closeshort(symbol,amount,price, numbuy, numsell):
try:
global apikey
global apisecret
args = {'size':str(amount),
'symbol':str(symbol),
'type':'MARKET',
'side':'BUY',
'price':str(price)}
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'POST','https://api.basefex.com/orders',expires, json.dumps(args))
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
requests.post("https://api.basefex.com/orders", json=args, headers=hed)
return True
except:
print("Random error, trying again")
def getmarketprice(contract):
global apikey
global apisecret
for _ in range(5):
try:
response = requests.get("https://api.basefex.com/instruments/prices")
price = str(str(response.text).split(contract)[1].split(',"price":')[1].split(".")[0])
return int(price)
except Exception as WW:
print("Exception with market price: "+str(WW))
#PREDICTS DATA
def commitpredict(col):
tickerdata = pandas.read_csv('out.csv',usecols=[col,])
tickerdata = | DataFrame(tickerdata) | pandas.DataFrame |
import pandas as pd
import scipy
import numpy as np
import seaborn as sns
import matplotlib as mpl
#from sinaplot import sinaplot
import scanpy as sc
from matplotlib import pyplot as plt
import scanpy.external as sce
import os
import scipy.spatial.distance
cwd = os.getcwd()
print(cwd)
def getLineagesFromChangeo(changeodb, print_summary):
"""subsets the changeo_db output by bracer by only those cells which are within lineages (non singletons)"""
df = changeodb
_df = df[df.CLONE != "None"] # get rid of unassigned cells (no BCR reconstructed)
_df = (df.CLONE.value_counts() > 1) #find clones with more than 1 member
if print_summary == True:
print( "There are", len(_df[_df == 1]), "lineages with more than one member")
CHANGEO_confidentlineages = df[df.CLONE.isin(_df[_df == 1].index)].sort_values('CLONE')
CHANGEO_confidentlineages = CHANGEO_confidentlineages[CHANGEO_confidentlineages.CLONE != 'None']
if print_summary == True:
print("number of cells in original dataframe", df.shape[0])
print("number of distinct Clones in original dataframe", df.drop_duplicates('CLONE').shape[0] -1) #subtract 1 for the 'None' entry
print(CHANGEO_confidentlineages.shape[0]/df.shape[0], 'percent of cells in a lineage' )
return CHANGEO_confidentlineages
# calculate distance metric
def calculate_distance_metric(_clonal_comparison, df):
# iterate, i know this is bad style..
for index, row in _clonal_comparison.iterrows():
# Cell to cell comparison
cell1 = row['cell1']
cell2 = row['cell2']
# df.loc is an array that you make
_clonal_comparison.loc[index, 'distance'] = distance.euclidean(df.loc[cell2, :].values, df.loc[cell1, :].values)
return _clonal_comparison
# get lineage correlations for all groups
def calculateLineageCorrelations(counts_table, changeo_db, method):
""" returns within lineage, amongst lineage, amongst all cells
ie a list or series of correlations between different groupings of cells (all, sisters, within lineage etc.)"""
if method == 'euclidean_distance':
method = scipy.spatial.distance.euclidean
lineage, within_lineage = calculateCorrelationsWithinLineage(changeo_db, counts_table, method)
#TODO seemlessly incorporate the print summary logic (maybe i don't really even need it at this point)
amongst_lineage = calculateCorrelations(counts_table[counts_table.index.isin(getLineagesFromChangeo(changeo_db, print_summary=False).CELL)],
method)
amongst_all_cells = calculateCorrelations(counts_table, method)
return within_lineage, amongst_lineage, amongst_all_cells
# lineage correlation functions
def calculateCorrelations(df, method):
# create empty list
correlations_list = []
#transform matrix to take pearson correllation
_pearsonCorrAll = df.T.corr(method=method)
for i in range(len(_pearsonCorrAll)):
correlations_list += list(_pearsonCorrAll.iloc[(i+1):,i])
correlations = pd.Series(correlations_list).dropna()
return correlations
def calculateCorrelationsWithinLineage(changeodb, counts_table, method):
"""idf is the dataframe used to index should be the changeo df, or minimally df with cell name and its clone information in columns,
counts_table is the cell by gene counts table from which the vectors of comparison come"""
idf = changeodb
lineage_ziplist = []
correlations = []
#iterate through each unique lineage ID
for clone in idf.CLONE.unique():
#get series of each cell in the lineage
series_of_cells = idf.CELL[idf.CLONE == clone]
#if the lineage is larger than 2 do the comparison
if len(series_of_cells.unique()) < 2:
pass
else:
#print(clone)
lineage_counts_table = counts_table[counts_table.index.isin(series_of_cells)]
correlation_matrix = lineage_counts_table.T.corr(method=method)
## subset array where no duplicate comparisons and no 1's
mask = np.zeros_like(correlation_matrix, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
drop_dup = correlation_matrix.where(mask)
drop_dup = drop_dup.values.flatten()
drop_dup = drop_dup[drop_dup != 1]
drop_dup = drop_dup[~np.isnan(drop_dup)] #not clear when and why I was getting nan's
#it may be when I filter the gene counts data frame for good cells some of the clones don't make it
correlations.extend(list(drop_dup))
lineage_ziplist.append(clone)
return lineage_ziplist, correlations
def makeHumanReadableSJTable(df_IGH, exon_start_dict):
"""Adds columns to the SJout file which aid in interpreting the various splice junctions accepts and SJout formatted file and a dictionary which maps coordinates to starting exons
Returns: SJout dataframes with extra columns, 1 df for the ab transcription (J spliced to downstream constant region) and 1 with just the switch transcripts defined as transcripts where the splice donor to exon1 is not a J gene """
all_exon_starts = exon_start_dict
_constant_regionspliceacceptors = df_IGH.copy()
_J_genes_start_dict = {105865406: "IGHJ1", 105863197: "IGHJ6", 105864586: 'IGHJ3', 105865198: 'IGHJ2', 105864214: 'IGHJ4', 105863813: 'IGHJ5'}
_constant_regionspliceacceptors['J_exon'] = _constant_regionspliceacceptors['end']
_constant_regionspliceacceptors['J_exon'] = _constant_regionspliceacceptors['J_exon'].map(_J_genes_start_dict).fillna(_constant_regionspliceacceptors.end)
# adding exon column
_constant_regionspliceacceptors['exon_start'] = _constant_regionspliceacceptors['start'].replace(all_exon_starts)
# filter by more than 2 unique mapping reads
_constant_regionspliceacceptors = _constant_regionspliceacceptors[_constant_regionspliceacceptors.unique_mapping > 2]
_constant_regionspliceacceptors = _constant_regionspliceacceptors.fillna('none')
#filter by more than 12 overhang (gets rid of E mapping artefact which TODO I should investigate
#cast the column to string
_constant_regionspliceacceptors['exon_start'] = _constant_regionspliceacceptors["exon_start"].astype(str)
# splices to the J genes
_J_tx_df = _constant_regionspliceacceptors[_constant_regionspliceacceptors['J_exon'].isin(_J_genes_start_dict.values())]
#take only the first exon of each constant region, this way we know we are counting switch transcripts
#filter out the splicing counts coming from the J genes to the first exon
switch_tx_df = _constant_regionspliceacceptors[(_constant_regionspliceacceptors.exon_start.str.contains('exon1'))
& (_constant_regionspliceacceptors.end < 105863197) # splice donor comes downstream of last J gene
& (~_constant_regionspliceacceptors.exon_start.str.contains('3prime'))] #get rid of odd but possibly interesting splicing where the acceptor is the 3prime? This may be related to backwards recombination or antisense transcription?
#filter out where J is the acceptor of the splice (possibly interesting but not interested right now)
_J_tx_df = _J_tx_df[~_J_tx_df.exon_start.str.contains("IGHJ")]
_J_tx_df['exon_simple'] = _J_tx_df['exon_start'].str.split('_', expand = True)[0]
return _J_tx_df, switch_tx_df
def addMetadataToAnnData(adata, changeodb_H, ab_tx):
_adata = adata.copy()
#_adata.obs = _adata.obs.reset_index()
#split cell names which includes superfluous information about the Donor ID
cell_names = _adata.obs.index.str[:-2]
#set loom index to cell names
_adata.obs.index = cell_names
#Call Isotypes
df_isotype_calls, x = callIsotypeByBracerSJout(changeodb_H, ab_tx, plot = False)
#select most abundant isotype
df_isotype_calls = df_isotype_calls.sort_values('unique_mapping',
ascending=False).drop_duplicates(subset='cell')
#make a cell column for merging dataframes
_adata.obs['cell'] = _adata.obs.index
#perform the merge to get isotype information in scanpy
_adata.obs = pd.merge(_adata.obs, df_isotype_calls, left_on='cell', right_on='cell', how='left')
# set index to cell
_adata.obs.set_index('cell', inplace=True)
#drop "cell" name from the index in df
_adata.obs.index.name = None
# make new changeodb_H column called cell to facilitate merging
changeodb_H['cell'] = changeodb_H.loc[:,'CELL']
#Filter to only functional assemblies
changeodb_H = changeodb_H[changeodb_H.FUNCTIONAL == True]
# Drop any duplicates which would be possible doublets
changeodb_H = changeodb_H.drop_duplicates(subset='cell')
#perform the same merging dance as before
_adata.obs['cell'] = _adata.obs.index
_adata.obs = pd.merge(_adata.obs, changeodb_H, left_on= 'cell', right_on = 'cell', how = 'left')
_adata.obs.set_index('cell', inplace = True)
_adata.obs.index.name = None
#_adata.obs = _adata.obs.fillna('no_assembly')
return _adata
def loadSJoutIGH(filename, metadata):
df_sjout = pd.read_feather(filename)
#filter dataframe to just the IGH locus
print("filtering SJout to just IGH locus")
df_IGH = df_sjout[(df_sjout['end'] > 105550000)]
#load metadata about constant region exon coordinates and I exons
df_exoncoordinates = pd.read_csv(metadata, header = None, names = ['exon', 'coordinate'] )
#Apply STAR vs. ENSMBL indexing correction
df_exoncoordinates.loc[~df_exoncoordinates.exon.str.contains('exon1'), 'coordinate'] = df_exoncoordinates.coordinate+1
df_exoncoordinates.loc[df_exoncoordinates.exon.str.contains('IGHG3_exon1'), 'coordinate'] = df_exoncoordinates.coordinate+1
#load metadata about constant region exon coordinates and I exons
#df_exoncoordinates = pd.read_csv(metadata, header = None, names = ['exon', 'coordinate'] )
zipper = zip(df_exoncoordinates.coordinate, df_exoncoordinates.exon)
all_exon_starts = dict(list(zipper))
print("making SJTable human readable")
ab_tx , switch_tx = makeHumanReadableSJTable(df_IGH, all_exon_starts)
return ab_tx, switch_tx
def loadChangeoDbH(filepath):
changeo_db = pd.read_csv(filepath, index_col = 0)
changeo_db = changeo_db[changeo_db.LOCUS == 'H']
changeo_db["CLONE"] = changeo_db['MERGE_CLONE'] #MERGE CLONE column was created in data combination process to make the column unique
return changeo_db
def loadData(SJout, loom_data, gene_counts, changeo_db):
ab_tx, switch_tx = loadSJoutIGH(SJout)
###
# Assemblies (Changeo)
changeo_db_H = loadChangeoDbH(changeo_db)
###
# Gene counts
###
print("loading anndata")
adata = 'placeholder'
loom_adata = sc.read_loom(loom_data)
adata = sc.read_h5ad(gene_counts)
return ab_tx, switch_tx, adata, loom_adata, changeo_db_H
def preprocessGeneCounts(gene_counts, num_counted_reads):
print("filtering cells with less than", str(num_counted_reads), "counted reads")
gene_counts_filtered = gene_counts[gene_counts.sum(axis=1) > num_counted_reads] # cell have more than 30000 mapped reads
print("normalizing to counts per million")
_CPM = gene_counts_filtered.div(gene_counts_filtered.sum(axis = 1), axis=0) * 1e6 # counts per million
print("log base 2 transforming")
_logCPM = np.log2(_CPM + 1) #log base 2p
return _logCPM
def preprocessScanpy(adata, num_counted_reads, num_genes, min_cells, n_neighbors, num_highly_variable):
""" returns a new adata object that has been processed """
_adata = adata.copy()
_adata.var_names_make_unique()
print("making var_names unique")
print("filtering cells with less than", num_counted_reads, "counted reads")
sc.pp.filter_cells(_adata,min_counts=num_counted_reads)
print("filtering cells with less than", num_genes, "genes detected")
sc.pp.filter_cells(_adata,min_genes=num_genes)
print("filtering genes detected in less than", min_cells)
sc.pp.filter_genes(_adata,min_cells=min_cells)
sc.pp.calculate_qc_metrics(_adata, inplace = True)
print("normalizing by total counts per cell")
sc.pp.normalize_total(_adata, exclude_highly_expressed=True)
print("log transforming data")
sc.pp.log1p(_adata, base=10)
_adata.raw = _adata
# Remove ERCCs which could drive clustering based on which batch was used or whether or not they were spiked in
ERCCs = _adata.var.index[_adata.var.index.str.contains("ERCC-")].to_list()
_adata = _adata[:, ~_adata.var.index.isin(ERCCs)]
print("removed ERCC sequences from genes to cluster on")
# Remove Immune Receptor Genes which could drive clustering
immune_receptors = pd.read_csv('/home/mswift/B_cells/CSR/sc_RNAseq/data_tables/metadata/immune_receptor_genes_keepConstantRegion.csv', index_col=0)
immune_receptors.columns = ['genes']
print("removing variable immune receptor genes which may drive clustering")
_adata = _adata[:, ~_adata.var.index.isin(immune_receptors.genes)]
print("calculating highly variable genes")
sc.pp.highly_variable_genes(_adata, n_top_genes = num_highly_variable)
sc.pp.scale(_adata)
print("calculating PCA")
sc.pp.pca(_adata)
neighbors = n_neighbors
print("creating neighbors graph with", n_neighbors)
## TODO some batch Correction?
sc.pp.neighbors(_adata, n_neighbors=neighbors)
print('umapping and tsne-ing')
sc.tl.umap(_adata)
sc.tl.tsne(_adata)
return _adata
def plotPointPlotLocus(IGH_locus_df, cell_list, color):
"""makes a point plot of the IGH locus where reach observation is a cell and an observation consists
of counts for each of the genes at the IgH locus"""
sns.set(style = "whitegrid", context = 'paper')
IGH_locus_df = IGH_locus_df[IGH_locus_df.index.isin(cell_list)]
## Point Plot individual clones
#Data munging
point_plot_df = IGH_locus_df
point_plot_df = point_plot_df.reset_index()
point_plot_df = pd.melt(point_plot_df, value_vars = point_plot_df.columns[1:], id_vars = 'index')
point_plot_df.columns = ['cell', 'exon', 'log CPM']
# plotting
fig, ax = plt.subplots(1, 1, figsize=(6,3))
sns.set_palette(color)
sns.pointplot(data = point_plot_df,
x = 'exon', y ='log CPM', hue='cell', dodge =.2,
join = True, legend=None, alpha = 0.5)
ax.set_ylabel("log$_2$ CPM")
ax.set_xlabel("")
ax.legend_.remove()
sns.pointplot(data = point_plot_df,
x = 'exon', y ='log CPM',
join = True, linestyles = "--", color = 'k')
sns.despine()
ax.set_ylabel("log$_2$ CPM")
ax.set_xlabel("")
sns.set_style("whitegrid", {'axes.grid' : False})
return fig, ax
## Scanpy helper functions:
def getCellByGeneMatrix(adata):
"""input: adata
return: df cell by gene"""
# get matrix
_x = pd.DataFrame(adata.X)
# add column names (genes)
_x.columns = adata.var_names
# add row names (cells)
_x = _x.set_index(adata.obs_names)
return _x
def getEmbeddingCoordinates(adata, embedding):
#filtering for good cells
embedding_key = 'X_' + embedding
_df_embedding = sc.get.obs_df(adata, obsm_keys = [(embedding_key, 0), (embedding_key, 1)])
return _df_embedding
def plotClonalCorrelations(gene_counts, changeodb_H, method):
""" Accepts gene counts data frame with cells as rows
and genes of interest as columns, Changeodb, and the method """
within_lineage, amongst_lineage, amongst_all_cells = calculateLineageCorrelations(gene_counts, changeodb_H, method)
hist = plt.grid()
if method == 'euclidean_distance':
bins = np.linspace(0,40,30)
else:
bins = np.linspace(-1.1,1.1, 30)
plt.hist(amongst_lineage, bins=bins, color = 'blue', label='Unrelated Pairs', density=True, histtype='stepfilled', alpha=0.5)
#plt.hist(amongst_lineage, bins=bins, color = 'blue', label='Unrelated Pairs', density=True, histtype='step')
# Pearson correlation hist for all pairs
# Pearson correlation hist for related pairs
plt.hist(within_lineage, bins=bins, color = 'red', label='Related Pairs', density=True, histtype='stepfilled', alpha = 0.5)
#plt.hist(within_lineage, bins=bins, color = 'red', label='Related Pairs', density=True, histtype='step')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel(method)
plt.ylabel('Density')
print("Mean Pearson correlation amongst all cells in grey:",amongst_all_cells.mean())
print("Mean Pearson correlation within lineages in red:", pd.Series(within_lineage).mean(), 'consisting of', pd.Series(within_lineage).shape[0], "comparisons")
print("Mean Pearson correlation amongst all lineages in blue:", (pd.Series(amongst_lineage).mean()), 'consisting of', pd.Series(amongst_lineage).shape[0], "comparisons")
print(scipy.stats.ks_2samp(within_lineage, amongst_lineage), 'KS-test result comparing amongst all lineages to within lineages')
def plotLocusHeatmap(sj_out_df, isotype_call_df, row_colorby):
"""plots a clustermap using seaborn an sjout file and the isotype calls, rows can be colored by "condition" or by "isotype" """
sj_out_df = sj_out_df[sj_out_df.cell.isin(isotype_call_df.cell)]
#collects all switch transcripts to each constant region exon even if they come from different I-exon coordinates
sj_out_df.loc[:,'cell'] = sj_out_df.loc[:,'cell'].str.replace('-','_')
sj_out_df = sj_out_df[sj_out_df.exon_start.str.contains('exon1')]
sum_df = sj_out_df.groupby(['cell', 'exon_start']).sum()
# log transform the uniquely mapping reads
sum_df['unique_mapping_log2'] = np.log2(sum_df['unique_mapping'])
#put data in long form
df = sum_df.unique_mapping_log2.unstack().fillna(0)
# make a condition column (for a color bar)
_df = df.copy()
_df['condition'], _df['cell'] = _df.index.str.split('_', 1).str
# map color dictionary to condition column
if row_colorby == 'condition':
# hard coded condition color bar based on plate names from the cluster
cultureConditionDict = {'P1': 'firebrick', 'D2': 'firebrick', 'MS': 'firebrick', 'CTY':'firebrick', 'IL6811':'firebrick', 'NaiveBcellsplate1':'midnightblue', 'NaiveBcellsplate2':'midnightblue', 'PASL':'green', 'AgSeqPooled': 'midnightblue'}
row_colors = _df.condition.map(cultureConditionDict)
#for isotype coloring but could be about something else if necessary
#TODO: still some Nans in the row_colors that I'm unsure of where they are coming from
else:
_df["CELL"] = _df['condition'] + "_" + _df['cell']
isotypeColorDict = dict(zip(isotype_call_df['cell'], isotype_call_df['node_color']))
row_colors = _df.CELL.map(isotypeColorDict)
#plot correlogram
# plot
return sns.clustermap(df, row_colors=row_colors), _df
def callIsotypeBySJout(ab_tx, plot):
#slow for large datasets
# other approaches could be better
# e.g https://stackoverflow.com/questions/15705630/get-the-rows-which-have-the-max-value-in-groups-using-groupby
_df = ab_tx.copy(deep=True)
# Parse Exon Start column
# find the J to 1st constant region junction with max support (max unique mapping reads), this could be decreasing sensitivity by not calling cells in the act
idx = _df.groupby(['cell'], sort=False)['unique_mapping'].transform(max) == _df['unique_mapping']
#filter sjout df by this
isotype_calls_df = _df[idx]
if plot == True:
f, ax = plt.subplots(figsize=(6, 15))
ax = sns.barplot(data=isotype_calls_df.exon_simple.value_counts().to_frame('counts').reset_index(), y = 'index', x = 'counts')
ax.set(xlabel="Isotypes called by splice junctions")
ax.set()
isotype_calls_df.loc[:,'ISOTYPE_by_splice'] = isotype_calls_df.loc[:,'exon_start'].str.split('_', expand=True)[0]
isotype_list = ['None', 'IGHM', 'IGHD', 'IGHG3', 'IGHDM', 'IGHG1', 'IGHA2', 'IGHG2', 'IGHG4', 'IGHE', 'IGHA1', 'nan']
color_list = ['grey', 'green', 'green', 'red', 'green', 'black', 'blue', 'magenta', 'pink','cyan', 'blue', 'grey']
color_isotype_dict = dict(zip(isotype_list, color_list))
isotype_calls_df.loc[:,'node_color']= isotype_calls_df.ISOTYPE_by_splice.map(color_isotype_dict)
return isotype_calls_df
def callIsotypeByBracer(changeodb):
changeodb_H = changeodb[changeodb.LOCUS == 'H']
f, ax = plt.subplots(figsize=(6, 15))
ax = sns.barplot(data=changeodb_H.ISOTYPE.value_counts().to_frame('counts').reset_index(), y = 'index', x ='counts')
ax.set(xlabel="Isotypes called by BrACER assembly")
ax.set()
return changeodb_H
def callIsotypeByBracerSJout(changeodb, ab_tx, plot):
dfBracer = changeodb[changeodb.LOCUS == 'H'].copy()
#only take functional Bracer assemblies (i.e. no stop codons thus the principal igh antibody being made)
dfBracer = dfBracer[dfBracer.FUNCTIONAL == True]
sj_out_df = ab_tx.copy()
#make "CELL" column to facilitate merge with "cell"
sj_out_df["CELL"] = sj_out_df.loc[:,"cell"]
merged_df = pd.merge(dfBracer, sj_out_df, how='inner', on='CELL')
#Take the the splice Junction calls where the J gene matches the J gene from the assembly
_df = merged_df[merged_df.J_CALL == merged_df.J_exon][['J_exon', 'cell', 'ISOTYPE', 'exon_start', 'unique_mapping']]
# find the J to 1st constant region junction with max support (max unique mapping reads)
idx = _df.groupby(['cell'], sort=False)['unique_mapping'].transform(max) == _df['unique_mapping']
#filter sjout df by this
isotype_calls_df = _df[idx]
#Plot Bar plot of relative abundances of each isotype
if plot == True:
f, ax = plt.subplots(figsize=(6, 15))
ax = sns.barplot(data=isotype_calls_df.exon_start.value_counts().to_frame('counts').reset_index(), y = 'index', x = 'counts')
ax.set(xlabel="Isotypes called by combined Bracer and SJ.out")
ax.set()
else:
ax = None
# Simplify the exon_start column for plotting/display purposes
isotype_calls_df.loc[:,'ISOTYPE_by_splice'] = isotype_calls_df.loc[:,'exon_start'].str.split('_', expand=True)[0]
isotype_list = ['None', 'IGHM', 'IGHD', 'IGHG3', 'IGHDM', 'IGHG1', 'IGHA2', 'IGHG2', 'IGHG4', 'IGHE', 'IGHA1', 'nan']
color_list = ['grey', 'green', 'green', 'red', 'green', 'black', 'blue', 'magenta', 'pink','cyan', 'blue', 'grey']
color_isotype_dict = dict(zip(isotype_list, color_list))
isotype_calls_df.loc[:,'node_color']= isotype_calls_df.ISOTYPE_by_splice.map(color_isotype_dict)
return isotype_calls_df, ax
def findActofSwitchingCells(ab_sjoutdf, threshold, changeodb_H, plot):
"""ab_sjoutdf is the df filtered for J gene splices, changeo_H is
the changeo_db from bracer summarise, and threshold is defines the ratio of
max counts supporting the major J to C splice junction divided by the sum of all J to C splice junctions
returns an SJout like df with only cells putatively in the act"""
#get rid of cells expressing IGHD(the dual expression of M and D confounds my filters)
_df = ab_sjoutdf[~ab_sjoutdf.exon_start.str.contains("IGHD")].copy()
_df = _df[_df.exon_start.str.contains("exon1")]
#divide the max unique mapping J to C splicing junction by the sum of all J to C splicing junctions
_x = _df.groupby('cell').max()/_df.groupby('cell').sum()
# cells in the act will have a number less than 1 for this value
act_cells = _x[_x.unique_mapping < threshold].index
#cells possibly in the act because they have multiple constant regions to J gene splicing events
_act_cells_df = _df[_df.cell.isin(act_cells)]
#cross reference to the assembly in order to use only the productive H chain transcript
changeo_merge = changeodb_H[['CELL', 'J_CALL', 'IN_FRAME', 'FUNCTIONAL']]
changeo_merge.columns = ['cell', 'J_CALL', 'IN_FRAME', 'FUNCTIONAL']
merged_df = pd.merge(changeo_merge, _act_cells_df, on='cell', how='inner')
productive_J_tx = merged_df[merged_df['J_CALL'].str.split('*', expand = True)[0] == merged_df['J_exon']]
productive_J_tx = productive_J_tx[productive_J_tx.IN_FRAME == True]
productive_J_tx = productive_J_tx[productive_J_tx.FUNCTIONAL == True]
productive_J_tx = productive_J_tx[productive_J_tx.duplicated(subset=['cell', 'J_CALL'], keep=False)]
#log transform counts
productive_J_tx['log2_unique_mapping'] = np.log2(productive_J_tx['unique_mapping'])
cells_in_act = productive_J_tx.drop_duplicates()
#Plot
if plot == True:
f, ax = plt.subplots(figsize=(7, 20))
ax = sns.barplot(data = cells_in_act, y = 'cell', x = 'log2_unique_mapping', hue = 'exon_start')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad = 0)
print(str(cells_in_act.cell.unique().shape[0]), 'cells in act of switching')
return cells_in_act
def plotSwitchAndAbtx(ab_tx, switch_tx, cell_list):
"""makes a point plot of the IGH locus where each observation is a cell and an observation consists
of counts for each of the genes at the IgH locus"""
#Make ab_tx dataframe long form (cell, exon_start, unique_mapping)
_dfab = ab_tx[ab_tx.exon_start.str.contains('exon1')].copy()
_dfab_sum = _dfab.groupby(['cell', 'exon_start']).sum()
_dfab_sum['unique_mapping_log2'] = np.log2(_dfab_sum['unique_mapping'])
_dfab_long = _dfab_sum.unique_mapping_log2.unstack().fillna(0)
new_columns = []
for i in _dfab_long.columns:
gene_name = i.split('_')[0]
new_columns.append(gene_name)
_dfab_long.columns = new_columns
point_plot_df = _dfab_long
point_plot_df = point_plot_df.reset_index()
point_plot_df = | pd.melt(point_plot_df, value_vars = point_plot_df.columns[1:], id_vars = point_plot_df.columns[0]) | pandas.melt |
import PyPDF2
import csv
from pathlib import Path
import io
import pandas
import numpy
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
# def Cpk(usl, lsl, avg, sigma , cf, sigma_cf):
# cpu = (usl - avg - (cf*sigma)) / (sigma_cf*sigma)
# cpl = (avg - lsl - (cf*sigma)) / (sigma_cf*sigma)
# cpk = numpy.min([cpu, cpl])
# return cpl,cpu,cpk
def convert_pdf_to_txt(path):
rsrcmgr = PDFResourceManager()
retstr = io.BytesIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
return text
def filename_extraction(inp_filename):
raw = inp_filename.split('_')
dev = raw[1]
volt = raw[2]
temp = raw[3]
condition = raw[4]+raw[5]+raw[6]+raw[7]
return dev,volt,temp,condition
############################### User inputs ###############################################
path_of_files = r'C:\Users\vind\OneDrive - Cypress Semiconductor\documents\python_codes\EYE_DIAG_ANALYZER\pdf_ccg3pa2_tt'
pathlist = Path(path_of_files).glob('**/*.pdf')
output_filename = 'out'
automated_data_collection = 'yes' #'no'
################################# Program Begins #########################################
if automated_data_collection == 'no':
with open(output_filename +'raw'+ '.csv', 'a', newline='') as csvfile:
mywriter1 = csv.DictWriter(csvfile, dialect='excel',
fieldnames=['rise_time_average', 'rise_time_minimum', 'rise_time_maximum',
'fall_time_average', 'fall_time_minimum', 'fall_time_maximum',
'bit_rate_average', 'bit_rate_minimum', 'bit_rate_maximum',
'voltage_swing_average', 'voltage_swing_minimum', 'voltage_swing_maximum', 'filename'])
mywriter1.writeheader()
for files in pathlist:
###################### extracting only measurement page of the pdf file ##########################################
print(files.name)
pdfFileObj = open(files,'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
pdfWriter = PyPDF2.PdfFileWriter()
pdfReader.getNumPages()
pageNum = 3
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfOutput = open('temp.pdf', 'wb')
pdfWriter.write(pdfOutput)
pdfOutput.close()
######################### pdf to text conversion ################################
x= convert_pdf_to_txt('temp.pdf')
text_extracted = x.split()
counter_list = list(enumerate(text_extracted, 1))
rise_time_average = (counter_list[91])[1]
fall_time_average = (counter_list[93])[1]
bit_rate_average = (counter_list[97])[1]
rise_time_minimum = (counter_list[145])[1]
fall_time_minimum = (counter_list[147])[1]
bit_rate_minimum = (counter_list[151])[1]
rise_time_maximum = (counter_list[156])[1]
fall_time_maximum = (counter_list[158])[1]
bit_rate_maximum = (counter_list[162])[1]
voltage_swing_average = (counter_list[131])[1]
voltage_swing_minimum = (counter_list[170])[1]
voltage_swing_maximum = (counter_list[174])[1]
data_raw = [float(rise_time_average), float(rise_time_minimum), float(rise_time_maximum), float(fall_time_average),
float(fall_time_minimum), float(fall_time_maximum), float(bit_rate_average), float(bit_rate_minimum),
float(bit_rate_maximum), float(voltage_swing_average), float(voltage_swing_minimum),
float(voltage_swing_maximum), files.name]
print(data_raw)
mywriter2 = csv.writer(csvfile, delimiter=',', dialect = 'excel')
mywriter2.writerow(data_raw)
################## Analysis begins ##########################################
| pandas.set_option('display.expand_frame_repr', False) | pandas.set_option |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = | DataFrame(['00:00:02']) | pandas.DataFrame |
from __future__ import print_function
import zeep
import numpy as np
import pandas as pd
import warnings
_INFO = """PyIress documentation (GitHub):
https://github.com/ceaza/pyiress"""
WSDL_URL_GENERIC='http://127.0.0.1:51234/wsdl.aspx?un={username}&cp={companyname}&svc={service}&svr=&pw={password}'
class PyIressException(Exception):
pass
class Iress(object):
def __init__(self, companyname,username, password, service='IRESS',raise_on_error=True, show_request=False,
proxy=None, **kwargs):
"""Establish a connection to the IRESS Web Services with Version 4 desktop.
companyname / username / password - credentials for the Iress account.
service - only service for desktop version is IRESS.
raise_on_error - If True then error request will raise a "IressException",
otherwise either empty dataframe or partially
retrieved data will be returned
show_request - If True, then every time a request string will be printed
A custom WSDL url (if necessary for some reasons) could be provided
via "url" parameter.
"""
import logging.config
self.services=[service]
self.show_request = show_request
if self.show_request:
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'zeep.transports': {
'level': 'DEBUG',
'propagate': True,
'handlers': ['console'],
},
}
})
else:
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
self.raise_on_error = raise_on_error
self.last_status = None # Will contain status of last request
WSDL_URL = WSDL_URL_GENERIC.format(companyname=companyname,username=username,password=password,service=service)
self._url = kwargs.pop('url', WSDL_URL)
# Trying to connect
try:
self.client = zeep.Client(wsdl=WSDL_URL)
except:
raise PyIressException('Cannot Connect')
# Create session
login_details={'UserName':username,
'CompanyName':companyname,
'Password':password,
'ApplicationID':'app'}
IRESSSessionStartInputHeader = {"Parameters":login_details}
self.session=self.client.service.IRESSSessionStart(Input=IRESSSessionStartInputHeader)
self.IRESSSessionKey=self.session.Result.DataRows.DataRow[0].IRESSSessionKey
self.UserToken=self.session.Result.DataRows.DataRow[0].UserToken
self.last_response = None
self.header={'Header':{'SessionKey':self.IRESSSessionKey}}
# Check available data sources
if 'IRESS' not in self.services:
warnings.warn("'IRESS' source is not available for given subscription!")
@staticmethod
def info():
print(_INFO)
def sources(self):
"""Return available sources of data.
Curretly only IRESS"""
return self.services
def version(self):
"""Return version of Iress Client."""
res = self.client.namespaces
return res
def _time_series(self,ticker,exchange,start_date,end_date,freq='daily'):
'''
SecurityCode string Yes No The security code to filter by.
Exchange string Yes No The exchange to filter by.
DataSource string Yes No The data source to filter by.
Frequency string No No The frequency type, one of 'daily', 'weekly', 'monthly', 'quarterly' or 'yearly'.
TimeSeriesFromDate date No No The date to retrieve time series from.
TimeSeriesToDate date No No The date to retrieve time series to.
'''
parameters={'Parameters': {'SecurityCode': ticker,
'Exchange': exchange,
'Frequency':freq,
'TimeSeriesFromDate':start_date.strftime('%Y/%m/%d'),
'TimeSeriesToDate':end_date.strftime('%Y/%m/%d')
} }
inputs={**self.header, **parameters}
res=self.client.service.TimeSeriesGet2(Input=inputs)
try:
data=zeep.helpers.serialize_object(res.Result.DataRows.DataRow)
df=pd.DataFrame(data)
# print(df.tail())
df['TimeSeriesDate']=pd.to_datetime(df.TimeSeriesDate)
df=df.set_index('TimeSeriesDate')
# print(df.columns)
except:
df=pd.DataFrame()
return df
def time_series(self,ticker,exchange,start_date,end_date,freq='daily',fields=[]):
'''
SecurityCode string Yes No The security code to filter by.
Exchange string Yes No The exchange to filter by.
DataSource string Yes No The data source to filter by.
Frequency string No No The frequency type, one of 'daily', 'weekly', 'monthly', 'quarterly' or 'yearly'.
TimeSeriesFromDate date No No The date to retrieve time series from.
TimeSeriesToDate date No No The date to retrieve time series to.
Available fields - ['OpenPrice', 'HighPrice', 'LowPrice', 'ClosePrice', 'TotalVolume',
'TotalValue', 'TradeCount', 'AdjustmentFactor', 'MarketVWAP',
'ShortSold', 'ShortSoldPercent', 'ShortSellPosition',
'ShortSellPositionPercent', '_value_1', 'exchange']
'''
part_date=start_date
data=pd.DataFrame()
while part_date < pd.Timestamp(end_date):
try:
new_data=self._time_series(ticker,exchange,part_date,end_date,freq)
data=pd.concat([data,new_data])
part_date = data.index.max() + pd.DateOffset(1,'D')
except:
break
return data
def dividends(self,ticker,exchange,start_date,end_date,freq=None,index_on='ExDividendDate'):
'''
SecurityCode string Yes No The security code to filter by.
Exchange string Yes No The exchange to filter by.
DataSource string Yes No The data source to filter by.
Frequency string No No The frequency type, one of 'daily', 'weekly', 'monthly', 'quarterly' or 'yearly'.
TimeSeriesFromDate date No No The date to retrieve time series from.
TimeSeriesToDate date No No The date to retrieve time series to.
available fields = ['DividendAmount', 'AdjustedDividendAmount', 'FrankedPercent',
'PayableDate', 'BooksClosingDate', 'DividendType', 'ShareRate',
'DividendYield', 'DRPPrice', 'DividendDescription', 'DeclarationDate',
'STCCreditsPerShare', '_value_1', 'exchange']
'''
parameters={'Parameters': {'SecurityCode': ticker,
'Exchange': exchange,
'PayDateFrom':start_date.strftime('%Y/%m/%d'),
'PayDateTo':end_date.strftime('%Y/%m/%d')
} }
inputs={**self.header, **parameters}
try:
res=self.client.service.SecurityDividendGetBySecurity(Input=inputs)
data=zeep.helpers.serialize_object(res.Result.DataRows.DataRow)
df=pd.DataFrame(data)
df[index_on]=pd.to_datetime(df[index_on])
df=df.set_index(index_on)
except:
df= | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if | is_bool(other) | pandas.core.dtypes.common.is_bool |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
import pandas as pd
def func_WVF(close, low, Lookback=22):
WVF = np.zeros((len(close),1))
for i in range(Lookback, len(close)):
highest_close_temp = close[i-22:i].max()
WVF[i] = (1 - low.values[i] / highest_close_temp) * 100
WVF_df = | pd.DataFrame(WVF, index=close.index) | pandas.DataFrame |
import pandas as pd
import numpy as np
from matplotlib import colors, cm, text, pyplot as plt
import matplotlib.patches as patches
import os
import time
from cmcrameri import cm
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
from cmcrameri import cm
import sqlite3
import glob
import tempfile
import zipfile
import json
import shutil
# generate a tile for each frame, annotating intersecting precursor cuboids
# loads the metadata from the specified zip file
def load_precursor_cuboid_metadata(filename):
temp_dir = tempfile.TemporaryDirectory().name
with zipfile.ZipFile(filename, "r") as zf:
zf.extractall(path=temp_dir)
names = zf.namelist()
with open('{}/{}'.format(temp_dir, names[0])) as json_file:
metadata = json.load(json_file)
# clean up the temp directory
shutil.rmtree(temp_dir)
return metadata
MZ_MIN = 748 # default is 748
MZ_MAX = 766 # default is 766
SCAN_MIN = 350 # default is 1
SCAN_MAX = 850 # default is 920
RT_MIN = 2000
RT_MAX = 2200
PIXELS_X = 800
PIXELS_Y = 800
PIXELS_PER_MZ = PIXELS_X / (MZ_MAX - MZ_MIN)
PIXELS_PER_SCAN = PIXELS_Y / (SCAN_MAX - SCAN_MIN)
minimum_pixel_intensity = 1
maximum_pixel_intensity = 250
EXPERIMENT_NAME = 'P3856'
TILES_BASE_DIR = '/home/ubuntu/precursor-cuboid-tiles'
RUN_NAME = 'P3856_YHE211_1_Slot1-1_1_5104'
CONVERTED_DATABASE_NAME = '/data2/experiments/P3856/converted-databases/exp-P3856-run-{}-converted.sqlite'.format(RUN_NAME)
# font paths for overlay labels
UBUNTU_FONT_PATH = '/usr/share/fonts/truetype/liberation/LiberationSans-Regular.ttf'
MACOS_FONT_PATH = '/Library/Fonts/Arial.ttf'
def pixel_x_from_mz(mz):
pixel_x = int((mz - MZ_MIN) * PIXELS_PER_MZ)
return pixel_x
def pixel_y_from_scan(scan):
pixel_y = int((scan - SCAN_MIN) * PIXELS_PER_SCAN)
return pixel_y
# load the raw data for the region of interest
print('loading the raw data from {}'.format(CONVERTED_DATABASE_NAME))
db_conn = sqlite3.connect(CONVERTED_DATABASE_NAME)
raw_df = pd.read_sql_query("select * from frames where frame_type == 0 and mz >= {} and mz <= {} and scan >= {} and scan <= {} and retention_time_secs >= {} and retention_time_secs <= {};".format(MZ_MIN, MZ_MAX, SCAN_MIN, SCAN_MAX, RT_MIN, RT_MAX), db_conn)
db_conn.close()
raw_df['pixel_x'] = raw_df.apply(lambda row: pixel_x_from_mz(row.mz), axis=1)
raw_df['pixel_y'] = raw_df.apply(lambda row: pixel_y_from_scan(row.scan), axis=1)
# sum the intensity of raw points that have been assigned to each pixel
pixel_intensity_df = raw_df.groupby(by=['frame_id', 'pixel_x', 'pixel_y'], as_index=False).intensity.sum()
print('intensity range {}..{}'.format(pixel_intensity_df.intensity.min(), pixel_intensity_df.intensity.max()))
# create the colour map to convert intensity to colour
colour_map = plt.get_cmap('ocean')
# colour_map = cm.batlow
norm = colors.LogNorm(vmin=minimum_pixel_intensity, vmax=maximum_pixel_intensity, clip=True) # aiming to get good colour variation in the lower range, and clipping everything else
# calculate the colour to represent the intensity
colours_l = []
for i in pixel_intensity_df.intensity.unique():
colours_l.append((i, colour_map(norm(i), bytes=True)[:3]))
colours_df = pd.DataFrame(colours_l, columns=['intensity','colour'])
pixel_intensity_df = | pd.merge(pixel_intensity_df, colours_df, how='left', left_on=['intensity'], right_on=['intensity']) | pandas.merge |
# Given an ensemble of models, evolve a random sequence to fulfill an objective.
# cf. https://blog.keras.io/how-convolutional-neural-networks-see-the-world.html
import keras
from keras import backend as K
from functools import partial
import sys
import os
import pandas
import numpy as np
import random
from numpy.random import choice, rand
import ConfigParser
import seq_evolution
import seq_selection
def debug_dists(seq1, seq2):
return np.sqrt(np.sum((seq1 - seq2)**2))
DNA = seq_evolution.DNA
class seq_evolution_class_gradient(seq_evolution.seq_evolution_class):
def __init__(self, cfg):
# prepare the models, populate the sequences, et cetera
self.init_noise = float(cfg.get('Params','INIT_NOISE'))
super(seq_evolution_class_gradient, self).__init__(cfg)
self.loss_tensor_fx = self._get_loss_tensor_fx(cfg) # given a model, get an output tensor
scores_fx_name = cfg.get('Functions','seq_scores_keras')
if scores_fx_name == 'None':
self.seq_scores_fx = None
else:
get_seq_scores_fx = eval(scores_fx_name)
[seq_start, seq_end] = [int(q) for q in cfg.get('Params', 'SEQ_INDICES').strip().split(',')]
self.seq_scores_fx = get_seq_scores_fx(self.models[0], seq_start, seq_end)
iterates = [self._get_iterate_fx_from_model(q) for q in self.models]
def _get_mean_grad(iterates, seq_scores_fx, input):
iterate_outputs = [q(input) for q in iterates]
losses = [p for (p,q) in iterate_outputs]
grads = [q for (p,q) in iterate_outputs]
#final_grad = K.mean(K.stack(grads, axis = 0), axis = 0)
final_grad = np.mean(np.stack(grads, axis = 0), axis = 0)
if seq_scores_fx != None:
seq_losses, seq_grad = seq_scores_fx(input)
losses += seq_losses
final_grad += seq_grad
return(losses, final_grad)
# given an input, get a list of losses, and the mean gradient of the input, as Numpy arrays
self.losses_and_grads = partial(_get_mean_grad, iterates, self.seq_scores_fx)
# For gradient updates - keep from "updating" immutable bases
self.mutable_mask = np.zeros(self.base_probs.shape[1])
self.mutable_mask[self.mutable] = 1.
self.mutable_mask = self.mutable_mask[np.newaxis,:self.mutable_mask.shape[0]-self.shift+1, np.newaxis]
def _get_loss_tensor_fx(self, cfg):
def loss_wrapper(merge_outputs_keras, model_in):
model_output = model_in.output
loss = merge_outputs_keras(model_output)
return(loss)
merge_outputs_keras = eval(cfg.get('Functions','merge_outputs_keras')) # needs to be written in terms of the backend
loss_tensor_fx = partial(loss_wrapper, merge_outputs_keras)
return(loss_tensor_fx) # takes a model, returns a loss tensor
# given a model, get a function taking an input tensor and returning loss and gradient tensors.
def _get_iterate_fx_from_model(self, model_in):
input_seq = model_in.input
loss = self.loss_tensor_fx(model_in)
grads = K.gradients(loss, input_seq)[0] # gradient at the input
# following example, normalize gradient
# cf. https://github.com/keras-team/keras/blob/master/examples/conv_filter_visualization.py
normalize = lambda x: x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
grads = normalize(grads)
iterate = K.function([input_seq], [loss, grads])
return(iterate)
# raise all values to some power, and renormalize -
# this encourages one value to dominate, approximating one-hot encoding.
def _norm_bias(self, seqs, norm_power):
print(np.min(seqs))
print(np.max(seqs))
seqs = np.power(seqs, norm_power[:,np.newaxis, np.newaxis])
seq_normalize = np.apply_along_axis(np.sum, 2, seqs)[...,np.newaxis]
seqs = seqs/seq_normalize
return(seqs)
# given an ensemble of models and a list of sequences, update with the mean of gradients
def _update_seq_ensemble(self, step, norm_power):
seqs = self.seqs_iter
losses, gradient = self.losses_and_grads([seqs])
losses = np.apply_along_axis(np.mean,0,np.stack(losses))
print(losses)
gradient = np.multiply(gradient, self.mutable_mask)
seqs += gradient*step[:,np.newaxis, np.newaxis]
seqs = np.clip(seqs,0.,1.) # enforce between 0 and 1
seq_normalize = np.apply_along_axis(np.sum, 2, seqs)[...,np.newaxis]
seqs = seqs/seq_normalize
seqs = self._norm_bias(seqs, norm_power)
self.seqs_iter = np.array(seqs)
return(losses)
def rand_range(self, shape_0, shape_1, min, max):
r = rand(shape_0, shape_1)*(max - min)
r = r + min
return(r)
def _generate_n_sequences(self, n):
seqs = np.zeros((n,) + self.base_probs.shape)
for i in range(self.base_probs.shape[1]):
if(np.max(self.base_probs[:,i]) == 1.):
seqs[:,:,i] = self.base_probs[:,i]
else:
probs = self.base_probs[:,i][np.newaxis,...]
probs = np.repeat(probs, n, axis = 0)
noise = self.rand_range(probs.shape[0], probs.shape[1], 1. - self.init_noise, 1. + self.init_noise)
seqs[:,:,i] = probs*noise
#noise = rand(probs.shape[0], probs.shape[1])*self.init_noise
#seqs[:,:,i] = probs + noise
normalize_arr = np.apply_along_axis(np.sum,1, seqs)[:,np.newaxis,:]
seqs = seqs/normalize_arr
return(seqs)
def _populate_sequences(self):
seqs = self._generate_n_sequences(self.num_seqs)
self.seqs = seqs
# In seq_evolve_to_threshold, we want to generate replacement sequences one at a time.
def _populate_one_sequence(self):
return( self._generate_n_sequences(1) )
# override method in parent class; update via gradient
def iterate(self): # , params, iter_idx
seqs_iter = np.swapaxes(self.seqs, 1, 2)
removed_pad = seqs_iter[:,seqs_iter.shape[1]-self.shift+1:,:]
self.seqs_iter = seqs_iter[:,:seqs_iter.shape[1]-self.shift+1,:]
print('Iteration (0): ' + str(self.curr_iters[0]))
losses = self._update_seq_ensemble(self.map_to_key('gradient_step'), self.map_to_key('normalize_power'))
seqs_iter = np.concatenate([self.seqs_iter, removed_pad], axis = 1)
self.seqs = np.swapaxes(seqs_iter, 1, 2)
self.score_tracking.append(losses)
def basic_iterative(self, num_iters):
for i in range(num_iters):
self.iterate()
self.curr_iters += 1
def generate_report(self):
seqs_out = self.de_onehot(self.seqs)
ans = {'Seqs': seqs_out}
orig_preds = self._test_sequences(self.seqs)
self.seqs = self.round_seqs(self.seqs)
final_preds = self._test_sequences(self.seqs)
print(np.mean((final_preds - orig_preds)[:,1,:],axis=1))
print(final_preds.shape)
for i,p in enumerate(self.output_names):
for j in range(final_preds.shape[-1]):
this_pred = final_preds[:,i,j]
this_name = p + '_' + str(j)
ans[this_name] = this_pred
return( | pandas.DataFrame(ans) | pandas.DataFrame |
import json
import csv
import glob
import pandas as pd
from pandas.core.series import Series
from pandas.core.frame import DataFrame
import matplotlib.pyplot as plt
from matplotlib import cm
# 日本語フォント設定
from matplotlib import rc
jp_font = "Yu Gothic"
rc('font', family=jp_font)
def delete_duplicaion_index(input_list):
"""delete consecutive numbers in the list.
Args:
input_list (list): A continuous number exists.
Returns:
list: There is no continuous number.
"""
temp = 0
index = []
for i in input_list:
if i - 1 == temp or i + 1 == temp:
pass
else:
index.append(i)
temp = i
return index
def plot_graph(pg_df, pg_title_text, pg_plane=True):
"""plot pandas DataFrame on the graph(s).
Args:
pg_df (pandas.DataFrame or pandas.Series): Data to be graphed.
pg_title_text (str): title
pg_plane (bool, optional): Choose between a single graph or multiple graphs.
Defaults to True.
"""
fig = plt.figure(figsize=(10, 6))
if pg_plane:
if type(pg_df) == DataFrame:
ax_1 = fig.add_subplot()
ax_2 = ax_1.twinx()
# 色の設定
color_1 = cm.Set1.colors[1]
color_2 = cm.Set1.colors[4]
# 表示
# 色はcm, 前後の指示はzorder, 線幅はlinewidth
# エラーが発生した場合はグラフは1個のみ表示
pg_df.iloc[:, 0].plot(ax=ax_1, color=color_1, zorder=-2, linewidth=2)
pg_df.iloc[:, 1].plot(ax=ax_2, color=color_2, zorder=-1, linewidth=0.5)
# グラフの凡例をまとめる
handler_1, label_1 = ax_1.get_legend_handles_labels()
handler_2, label_2 = ax_2.get_legend_handles_labels()
_ = ax_2.legend(handler_1 + handler_2, label_1 + label_2)
# タイトルとグリッド表示
_ = ax_1.set_title(pg_title_text)
_ = ax_1.grid(True)
elif type(pg_df) == Series:
ax = fig.add_subplot()
pg_df.plot(ax=ax)
_ = ax.legend()
# タイトルとグリッド表示
_ = ax.set_title(pg_title_text)
_ = ax.grid(True)
else:
raise Exception("pandasの型式ではありません。")
else:
ax = fig.subplots(3, 3)
plt.suptitle(pg_title_text)
ax_f = ax.flatten()
for i, m in enumerate(pg_df):
ax_f[i].plot(pg_df[m])
_ = ax_f[i].set_title(m)
_ = ax_f[i].grid(True)
# グラフの重なりをなくす為に必要
plt.tight_layout()
plt.show()
class ExtractorData():
"""concatenate csv files, extract specific data and save the results to excel.
"""
def __init__(self, json_file_path):
"""read json, set the variables, concatenate csv files and make dataframe.
Args:
json_file_path (str): path of the json file.
"""
# パラメータの取り出し
with open(json_file_path, "r", encoding="utf-8") as setting:
self._setting_dict = json.load(setting)
# 設定jsonから変数へ読み込み
# ファイル名
single_file_names = glob.glob(self._setting_dict["file"]["path"] + self._setting_dict["file"]["single"])
double_file_names = glob.glob(self._setting_dict["file"]["path"] + self._setting_dict["file"]["double"])
all_file_names = single_file_names + double_file_names
# ラベル
self._label_dict = self._setting_dict["label"]
# 閾値
self._period_step = self._setting_dict["period"]["step"]
self._period_start = self._setting_dict["period"]["start"]
self._period_end = self._setting_dict["period"]["end"]
# 初回プロットの範囲
self._1st_plot_range_start = self._setting_dict["1st_plot"]["start"]
self._1st_plot_range_end = self._setting_dict["1st_plot"]["end"]
# 抽出タイミング
self._extract_dict = self._setting_dict["extract"]
# 参照データの切り取りタイミング
self._referance_1st = self._setting_dict["reference"]["1st"]
# 結果データの読み込み
temp_list = []
for i in all_file_names:
print(i)
temp = pd.read_csv(i, skiprows=70, encoding="cp932")
temp_list.append(temp)
# データフレームの結合
self._df_csv = | pd.concat(temp_list, ignore_index=False) | pandas.concat |
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns; sns.set()
import warnings; warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib")
from matplotlib.colors import ListedColormap
from matplotlib.ticker import MaxNLocator
from pylab import *
from sklearn.metrics.pairwise import euclidean_distances
import keywords
rcParams['axes.titlesize'] = 9
rcParams['axes.titlepad'] = 2
# change if needed
limit_plots = True
limit_num = 50 # plot for this many users when limit_plots is True
verbose = True
similarity_threshold = 0.5
similarity_thresholds_list = np.linspace(0, 1, 55)
# specify year and problem number
unit = int(sys.argv[1])
pb = int(sys.argv[2])
u_pb = '{}-{}'.format(unit, pb) # in format 1-1, 5-3, etc
# set substrings to count/track through user submission history
substrs = keywords.python_keywords
keyword_groups = keywords.python_keyword_colors
# load user histories
kw_occ_traj_path = '../../data/keyword_occurrence/trajectory/unit{}_pb{}_kw_occ_traj.csv'
kw_occ_traj_df = pd.read_csv(kw_occ_traj_path.format(u_pb[0], u_pb[2]), index_col=None)
# load sample solutions
sample_sols = pd.read_csv('sample_sols.csv', index_col=None)
# where to save plots
save_dir = 'submission_history_plots' # saves in this_dir/problem_subfolder
os.makedirs('{}/{}-{}'.format(save_dir, unit, pb), exist_ok=True)
def main():
all_u_kw_occ_comments = pd.Series(kw_occ_traj_df.kw_occ_matrix.values,
index=kw_occ_traj_df.user_id).to_dict()
all_u_kw_occ_stripped = pd.Series(kw_occ_traj_df.stripped_kw_occ_matrix.values,
index=kw_occ_traj_df.user_id).to_dict()
all_u_exp = pd.Series(kw_occ_traj_df.user_exp.values,
index=kw_occ_traj_df.user_id).to_dict()
all_final_correct_bool = pd.Series(kw_occ_traj_df.final_correct_bool.values,
index=kw_occ_traj_df.user_id).to_dict()
plot_count = 0
make_plots = False
similar_correct = 0
different_correct = 0
similar_incorrect = 0
different_incorrect = 0
exp_dict_counts = {
'absolutely_none' : [0, 0, 0, 0],
'other_language' : [0, 0, 0, 0],
'know_python' : [0, 0, 0, 0],
'veteran': [0, 0, 0, 0],
'no_response' : [0, 0, 0, 0]
}
for u_id in kw_occ_traj_df['user_id'].to_list():
if limit_plots and plot_count == limit_num:
make_plots = False
# heatmap using comments stripped counts
stripped_counts = eval(all_u_kw_occ_stripped[u_id])
cols_as_subs = np.array(stripped_counts).T
sub_name_cols = ['Sub {}'.format(i+1) for i in range(len(cols_as_subs[0]))]
df_to_corr = pd.DataFrame(cols_as_subs, columns=sub_name_cols)
sample = sample_sols.loc[sample_sols['problem']==u_pb, 'solution'].iloc[0] # extract
df_to_corr['Sample'] = subs_substr_count([sample], substrs)[0]
dist = df_to_corr.corr() # used for plotting
dist_matrix = dist.to_numpy() # used for similarity count
n = len(dist_matrix)
sample_similarity = dist_matrix[n-1][n-2]
exp_level = all_u_exp[u_id]
# compare correctness, similarity to sample using sim threshold
if all_final_correct_bool[u_id]:
if sample_similarity >= similarity_threshold:
similar_correct += 1
exp_dict_counts[exp_level][0] += 1
else:
different_correct += 1
exp_dict_counts[exp_level][1] += 1
else:
if sample_similarity >= 0.5:
similar_incorrect += 1
exp_dict_counts[exp_level][2] += 1
else:
different_incorrect += 1
exp_dict_counts[exp_level][3] += 1
if make_plots:
# over time, including comments
f1 = plt.figure()
counts = eval(all_u_kw_occ_comments[u_id])
subs_df = pd.DataFrame(columns=(['Submission']+substrs), data=add_idx(counts))
ax = subs_df.set_index('Submission').plot(kind='bar', stacked=True,
colormap=ListedColormap(sns.xkcd_palette(keyword_groups)), figsize=(8,6))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f1_title = 'Keyword occurrences in submission history \n of user {} on Problem {}'
plt.suptitle(f1_title.format(u_id, u_pb))
plt.savefig('{}/{}/{}_comments.png'.format(save_dir, u_pb, u_id), bbox_inches='tight')
# heatmaps
f2 = plt.figure()
mask = np.zeros_like(dist)
mask[np.triu_indices_from(mask)] = True # show only below diagonal
sns.heatmap(dist, vmin=0.0, vmax=1.0, mask=mask, square=True, cmap='coolwarm', linewidths=.5)
plt.yticks(rotation=0)
plt.xticks(rotation=90)
f2_title = 'Code submission history correlations \n of user {} on Problem {}'
plt.suptitle(f2_title.format(u_id, u_pb))
plt.savefig('{}/{}/{}_heatmap.png'.format(save_dir, u_pb, u_id), bbox_inches='tight')
# over time, comments stripped
f3 = plt.figure()
stripped_subs_df = pd.DataFrame(columns=(['Submission']+substrs), data=add_idx(stripped_counts))
ax = stripped_subs_df.set_index('Submission').plot(kind='bar', stacked=True,
colormap=ListedColormap(sns.xkcd_palette(keyword_groups)), figsize=(8,6))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f3_title = 'Term occurrences in comment-stripped submission history \n of user {} on Problem {}'
plt.suptitle(f3_title.format(u_id, u_pb))
plt.xlabel('Submission on Problem {}'.format(u_pb), fontsize=18)
plt.savefig('{}/{}/{}_stripped.png'.format(save_dir, u_pb, u_id), bbox_inches='tight')
plt.close('all')
plot_count += 1
if verbose:
total_correct = float(similar_correct + different_correct)
total_incorrect = float(similar_incorrect + different_incorrect)
print('Total correct students: {}'.format(total_correct))
print('Total incorrect students: {} \n'.format(total_incorrect))
print('Correct students with similar solutions: {}'.format(similar_correct))
print('Percentage of correct: {} \n'.format(similar_correct/total_correct))
print('Correct students with very different solutions: {}'.format(different_correct))
print('Percentage of correct: {} \n'.format(different_correct/total_correct))
print('Incorrect students with similar solutions: {}'.format(similar_incorrect))
print('Percentage of incorrect: {} \n'.format(similar_incorrect/total_incorrect))
print('Incorrect students with very different solutions: {}'.format(different_incorrect))
print('Percentage of incorrect: {} \n'.format(different_incorrect/total_incorrect))
do_sim_cor_results(similar_correct,
different_correct,
similar_incorrect,
different_incorrect,
unit, pb, similarity_threshold)
do_sim_cor_by_exp(exp_dict_counts, unit, pb, similarity_threshold)
def subs_substr_count(submission_list, substr_list):
'''
Counts desired substrings in each submission string.
Both strings and substrings given in lists.
:param submission_list: python list of code submissions, as strings
:param substr_list: specifies strings for which to count occurrences in code
'''
sub_substrs_matrix = []
for sub in submission_list:
sub_substrs = [sub.count(substr) for substr in substr_list]
sub_substrs_matrix.append(sub_substrs)
return sub_substrs_matrix
def add_idx(cts):
'''
Prepends the 1-indexed index to each row in the 2D array cts
:param cts: the 2D array representing the substring counts
(mutator function - beware when using)
'''
for i in range(len(cts)):
cts[i] = [i+1] + cts[i]
return cts
def remove_comments_str(astr):
'''
Removes comment lines and extra newlines from astr, returns astr.
:param astr: input string to process
'''
astr = re.sub(r'(#+)(.*)\r\n', '\r\n', astr) # single line comments - keep return
astr = re.sub(r'\"\"\"[\s\S]*?\"\"\"', '', astr) # multiline comments
astr = re.sub(r'(\r\n)+[\s]*\r\n', '\r\n', astr) # extra newlines
return astr
def do_sim_cor_results(sim_cor, diff_corr, sim_in, diff_in, unit, problem, sim_th):
'''
Prints out numbers of students by similarity and correctness.
Also saves results to a quadrant heatmap plot.
:param :
:param :
:param :
:param :
'''
sim_cor_counts = {'correctness': ['correct', 'correct', 'incorrect', 'incorrect'],
'similarity':['similar', 'different', 'similar', 'different'],
'students': [sim_cor, diff_corr, sim_in, diff_in]}
sim_cor_counts_df = pd.DataFrame(sim_cor_counts).pivot('correctness', 'similarity', 'students')
cmap = sns.cubehelix_palette(light=0.95, as_cmap=True)
ax = sns.heatmap(sim_cor_counts_df, cmap=cmap, annot=True, fmt='d')
quad_plot_title = 'Student final submission counts \n by correctness-similarity for Problem {}-{} \n thresholded at {} similarity to sample'
plt.title(quad_plot_title.format(unit, problem, sim_th))
os.makedirs('quad_plots', exist_ok=True)
plt.savefig('quad_plots/sim_{}_cor_{}_{}'.format(int(sim_th*100), unit, pb))
def do_sim_cor_by_exp(experience_counts, unit, problem, sim_th):
'''
'''
fig,axn = plt.subplots(2, 2, sharex=True, sharey=True)
cbar_ax = fig.add_axes([.91, .3, .03, .4])
exp_is = {0:'absolutely_none', 1:'other_language', 2:'know_python', 3:'veteran'}
for i, ax in enumerate(axn.flat):
sim_cor_counts = {'correctness': ['correct', 'correct', 'incorrect', 'incorrect'],
'similarity':['similar', 'different', 'similar', 'different'],
'students': experience_counts[exp_is[i]]}
exp_sim_cor_counts_df = pd.DataFrame(sim_cor_counts).pivot('correctness', 'similarity', 'students')
exp_np = exp_sim_cor_counts_df.to_numpy()
cmap = sns.cubehelix_palette(light=0.95, as_cmap=True)
sns.heatmap(exp_sim_cor_counts_df, cmap=cmap, annot=exp_np/exp_np.sum(), fmt='.1%',
ax=ax,
cbar=i == 0,
vmin = min(min(experience_counts.values())),
vmax = max(max(experience_counts.values())),
cbar_ax=None if i else cbar_ax,
cbar_kws=dict(ticks=None))
ax.title.set_text(exp_is[i])
exp_plot_title = 'Correctness-similarity by experience for Problem {}-{}, thresholded at {} similarity to sample'
plt.suptitle(exp_plot_title.format(unit, problem, sim_th), fontsize=10, y=0.99)
fig.tight_layout(rect=[0, 0, .9, 1])
plt.title('students', fontsize=9)
os.makedirs('quad_exp_plots', exist_ok=True)
plt.savefig('quad_exp_plots/exp_sim_{}_cor_{}_{}'.format(int(sim_th*100), unit, pb))
def similarity_sweep(kw_occ_traj_df, sample_sols_df, sim_th_list, unit, problem):
'''
'''
all_u_kw_occ_stripped = | pd.Series(kw_occ_traj_df.stripped_kw_occ_matrix.values,
index=kw_occ_traj_df.user_id) | pandas.Series |
from sklearn.metrics import roc_auc_score, accuracy_score, r2_score, mean_squared_error, mean_absolute_error
from amlpp.conveyor import Conveyor
from datetime import datetime
from typing import List
import pandas as pd
import pickle
import os
##############################################################################
class Experimenter():
""" The class for working with the structure of experiments in the project
Parameters
----------
experiment: str
Experiment name
"""
def __init__(self, experiment:str):
self.experiment = experiment
self.path_experiment = "experiments/" + experiment
if not os.path.exists(self.path_experiment):
os.makedirs(self.path_experiment)
self.model = None
else:
self.model = self._load_model()
print("load model successful!" if self.model else "model not found!")
def create_experiment(self, model:Conveyor, description_model:str, description_trainset:str):
""" Creation of an experiment
Parameters
----------
model: Conveyor
Trained model
description_model: str
Description of the experiment, model, projects and other significant details
description_trainset: str
Name or path to training set
"""
with open(self.path_experiment + "/model", 'wb') as file:
pickle.dump(model, file)
self.model = model
description = description_model
description += f"\nTrainset: {description_trainset}"
description += f"\nModel:\n{repr(self.model)}"
self.add_description(self.path_experiment, description, 'w+')
def make_experiment(self,
X:pd.DataFrame, Y:pd.DataFrame,
expr_description:str = "", expr_name:str = "",
X_features:List[str] = [],
feature_importances:bool = True):
"""Carrying out an experiment on a test dataset
Parameters
----------
X_test : pd.DataFrame = None
Test dataset, features (regressors)
Y_test : pd.DataFrame = None
Test dataset, targets
description : str = ""
Description of a specific test
testset_name : str = ""
Test dataset name, or description
X_test_features : List [str] = None
features from the test dataset that will be included in the result set
feature_importances : bool = True
Display charts or not
scoring : bool = True
save and print scoring table or not
"""
if self.model:
date = datetime.now().strftime("%d-%m-%y %H-%M-%S")
path_current_experiment = f"{self.path_experiment}/{date} - {expr_name}"
os.makedirs(path_current_experiment)
x_, y_ = self.model.transform(X, Y)
predict = self.model.estimator.predict(x_)
score = ""
for metr in (r2_score, mean_squared_error, mean_absolute_error, roc_auc_score, accuracy_score):
try:
score += f"function - {metr.__name__} = {metr(y_, predict)}\n"
except Exception as e:
score += f"function - {metr.__name__} = ERROR: {e}\n"
expr_name = f"({self.experiment}) {expr_name}"
expr_description = f"{expr_description}\nScore:\n{score}"
self.add_description(path_current_experiment, expr_description, 'w+')
print(expr_description)
result_data = X[X_features] if X_features else | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 01 10:00:58 2021
@author: <NAME>
"""
#------------------------------------------------------------------#
# # # # # Imports # # # # #
#------------------------------------------------------------------#
from math import e
import numpy as np
import pandas as pd
import os
import time
import glob
import itertools
from joblib import Parallel, delayed
from generate_files import GenerateFiles
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
import seaborn as sns
import matplotlib.style as style
style.use('seaborn-poster') #sets the size of the charts
style.use('ggplot')
from scipy import ndimage
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_filename
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy.units as u
from astropy.stats import mad_std
import astrotools.healpytools as hpt
import astropy_healpix as ahp
from astropy.coordinates import ICRS
from tqdm import tqdm
from collections import Counter
import warnings
warnings.filterwarnings('ignore')
import healpy as hp
from hpproj import CutSky, to_coord
import logging
cs_logger = logging.getLogger('cutsky')
cs_logger.setLevel(logging.WARNING)
cs_logger.propagate = False
hpproj_logger = logging.getLogger('hpproj')
hpproj_logger.setLevel(logging.WARNING)
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
#------------------------------------------------------------------#
# # # # # Functions # # # # #
#------------------------------------------------------------------#
class MakeData(object):
"""Class to create and preprocess input/output files from full sky-maps.
"""
def __init__(self, dataset, npix, loops, planck_path, milca_path, disk_radius=None, output_path=None):
"""
Args:
dataset (str): file name for the cluster catalog that will used.
Options are 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'.
bands (list): list of full sky-maps that will be used for the input file.
loops (int): number of times the dataset containing patches with at least one cluster within will be added
again to training set with random variations (translations/rotations).
Options are 100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', and 'y-map'.
More full sky-maps will be added later on (e.g. CO2, X-ray, density maps).
planck_path (str): path to directory containing planck HFI 6 frequency maps.
Files should be named as following
'HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits',
'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits',
'HFI_SkyMap_545-field-Int_2048_R3.00_full.fits', 'HFI_SkyMap_857-field-Int_2048_R3.00_full.fits'.
milca_path (str): path to directory containing MILCA full sky map. File should be named 'milca_ymaps.fits'.
disk_radius (float, optional): Disk radius that will be used to create segmentation masks for output files.
Defaults to None.
output_path (str, optional): Path to output directory. Output directory needs be created beforehand using
'python xcluster.py -m True' selecting same output directory in 'params.py'.
If None, xcluster path will be used. Defaults to None.
"""
self.path = os.getcwd() + '/'
self.dataset = dataset # 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'
self.bands = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz','y-map','CO','p-noise']
self.loops = loops
self.n_labels = 2
maps = []
self.freq = 1022
self.planck_freq = 126
if '100GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 100', 'docontour': True}))
# self.freq += 2
# self.planck_freq += 2
if '143GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 143', 'docontour': True}))
# self.freq += 4
# self.planck_freq += 4
if '217GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_217-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 217', 'docontour': True}))
# self.freq += 8
# self.planck_freq += 8
if '353GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 353', 'docontour': True}))
# self.freq += 16
# self.planck_freq += 16
if '545GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_545-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 545', 'docontour': True}))
# self.freq += 32
# self.planck_freq += 32
if '857GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_857-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 857', 'docontour': True}))
# self.freq += 64
# self.planck_freq += 64
if 'y-map' in self.bands:
maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True}))
# self.freq += 128
if 'CO' in self.bands:
maps.append((planck_path + "COM_CompMap_CO21-commander_2048_R2.00.fits", {'legend': 'CO', 'docontour': True}))
# self.freq += 256
if 'p-noise' in self.bands:
maps.append((planck_path + 'COM_CompMap_Compton-SZMap-milca-stddev_2048_R2.00.fits', {'legend': 'noise', 'docontour': True}))
# self.freq += 512
maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True})) #used for plots only
self.maps = maps
self.temp_path = self.path + 'to_clean/'
self.disk_radius = disk_radius
self.npix = npix #in pixels
self.pixsize = 1.7 #in arcmin
self.ndeg = (self.npix * self.pixsize)/60 #in deg
self.nside = 2
if output_path is None:
self.output_path = self.path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/")
else:
self.output_path = output_path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/")
self.dataset_path = self.path + 'datasets/' + self.dataset + '/'
self.planck_path = planck_path
self.milca_path = milca_path
self.test_regions = [[0, 360, 90, 70],
[0, 120, 70, 40], [120, 240, 70, 40], [240, 360, 70, 40],
[0, 120, 40, 18], [120, 240, 40, 18], [240, 360, 40, 18],
[0, 120, -18, -40], [120, 240, -18, -40], [240, 360, -18, -40],
[0, 120, -40, -70], [120, 240, -40, -70], [240, 360, -40, -70],
[0, 360, -70, -90]]
self.val_regions = [[0, 180, -20, -40],
[0, 180, -20, -40], [0, 180, -20, -40], [0, 180, -20, -40],
[0, 360, -40, -60], [0, 360, -40, -60], [0, 360, -40, -60],
[0, 360, 60, 40], [0, 360, 60, 40], [0, 360, 60, 40],
[0, 180, 40, 20], [0, 180, 40, 20], [0, 180, 40, 20],
[0, 180, 40, 20]]
def plot_psz2_clusters(self, healpix_path):
"""Saves plots containing patches for planck frequency maps and y-map.
Function is deprecated and will be removed in later versions.
Args:
healpix_path (str): output path for plots (deprecated).
"""
maps = self.maps
PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits')
glon = PSZ2[1].data['GLON']
glat = PSZ2[1].data['GLAT']
freq = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', 'y-map']
for j in range(len(glon)):
fig = plt.figure(figsize=(21,14), tight_layout=False)
fig.suptitle(r'$glon=$ {:.2f} $^\circ$, $glat=$ {:.2f} $^\circ$'.format(glon[j], glat[j]), y=0.92, fontsize=20)
cutsky = CutSky(maps, npix=self.npix, pixsize=self.pixsize, low_mem=False)
coord = to_coord([glon[j], glat[j]])
result = cutsky.cut_fits(coord)
for i,nu in enumerate(freq):
ax = fig.add_subplot(3,4,1+i)
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
HDU = result[i]['fits']
im = ax.imshow(HDU.data, origin="lower")
w = WCS(HDU.header)
sky = w.world_to_pixel_values(glon[j], glat[j])
segmentation = plt.Circle((sky[0], sky[1]), 2.5/1.7, color='white', alpha=0.1)
ax.add_patch(segmentation)
ax.axvline(sky[0], ymin=0, ymax=(self.npix//2-10)/self.npix, color='white', linestyle='--')
ax.axvline(sky[0], ymin=(self.npix//2+10)/self.npix, ymax=1, color='white', linestyle='--')
ax.axhline(sky[1], xmin=0, xmax=(self.npix//2-10)/self.npix, color='white', linestyle='--')
ax.axhline(sky[1], xmin=(self.npix//2+10)/self.npix, xmax=1, color='white', linestyle='--')
# ax.scatter(sky[0], sky[1], color='red')
ax.set_title(r'%s'%nu)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.savefig(healpix_path + 'PSZ2/PSZ2_skycut_%s.png'%j, bbox_inches='tight', transparent=False)
plt.show()
plt.close()
def create_catalogs(self, plot=False):
"""Creates the following catalogs using 'PSZ2v1.fits', 'MCXC-Xray-clusters.fits', and 'redmapper_dr8_public_v6.3_catalog.fits'
(see <NAME> 2018 for more details):
planck_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with known redshift:
'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC', 'Z'
planck_no_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with unknown redshift:
'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC'
MCXC_no_planck (pd.DataFrame): dataframe with the following columns for MCXC clusters:
'RA', 'DEC', 'R500', 'M500', 'Z'
RM50_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>50:
'RA', 'DEC', 'LAMBDA', 'Z'
RM30_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>30:
'RA', 'DEC', 'LAMBDA', 'Z'
Catalogs are saved in output_path + /catalogs/. Input catalogs are in planck_path.
Args:
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits')
df_psz2 = pd.DataFrame(data={'RA': PSZ2[1].data['RA'].tolist(), 'DEC': PSZ2[1].data['DEC'].tolist(), 'GLON': PSZ2[1].data['GLON'].tolist(), 'GLAT':PSZ2[1].data['GLAT'].tolist(),
'M500': PSZ2[1].data['MSZ'].tolist(), 'R500': PSZ2[1].data['Y5R500'].tolist(), 'REDMAPPER': PSZ2[1].data['REDMAPPER'].tolist(), 'MCXC': PSZ2[1].data['MCXC'].tolist(),
'Z': PSZ2[1].data['REDSHIFT'].tolist()})
df_psz2 = df_psz2.replace([-1, -10, -99], np.nan)
planck_no_z = df_psz2.query('Z.isnull()', engine='python')
planck_z = df_psz2.query('Z.notnull()', engine='python')
# planck_no_z = planck_no_z[['RA', 'DEC']].copy()
# planck_z = planck_z[['RA', 'DEC']].copy()
planck_no_z.to_csv(self.path + 'catalogs/planck_no-z' + '.csv', index=False)
planck_z.to_csv(self.path + 'catalogs/planck_z' + '.csv', index=False)
MCXC = fits.open(self.planck_path + 'MCXC-Xray-clusters.fits')
MCXC_skycoord = SkyCoord(ra=MCXC[1].data['RA'].tolist(), dec=MCXC[1].data['DEC'].tolist(), unit=u.degree)
MCXC_GLON = list(MCXC_skycoord.galactic.l.degree)
MCXC_GLAT = list(MCXC_skycoord.galactic.b.degree)
df_MCXC = pd.DataFrame(data={'RA': MCXC[1].data['RA'].tolist(), 'DEC': MCXC[1].data['DEC'].tolist(), 'R500': MCXC[1].data['RADIUS_500'].tolist(), 'M500': MCXC[1].data['MASS_500'].tolist(),
'GLON': MCXC_GLON, 'GLAT': MCXC_GLAT, 'Z': MCXC[1].data['REDSHIFT'].tolist()})
REDMAPPER = fits.open(self.planck_path + 'redmapper_dr8_public_v6.3_catalog.fits')
REDMAPPER_skycoord = SkyCoord(ra=REDMAPPER[1].data['RA'].tolist(), dec=REDMAPPER[1].data['DEC'].tolist(), unit=u.degree)
REDMAPPER_GLON = list(REDMAPPER_skycoord.galactic.l.degree)
REDMAPPER_GLAT = list(REDMAPPER_skycoord.galactic.b.degree)
df_REDMAPPER = pd.DataFrame(data={'RA': REDMAPPER[1].data['RA'].tolist(), 'DEC': REDMAPPER[1].data['DEC'].tolist(), 'LAMBDA': REDMAPPER[1].data['LAMBDA'].tolist(),
'GLON': REDMAPPER_GLON, 'GLAT': REDMAPPER_GLAT, 'Z': REDMAPPER[1].data['Z_SPEC'].tolist()})
df_REDMAPPER_30 = df_REDMAPPER.query("LAMBDA > 30")
df_REDMAPPER_50 = df_REDMAPPER.query("LAMBDA > 50")
ACT = fits.open(self.planck_path + 'sptecs_catalog_oct919_forSZDB.fits')
SPT = fits.open(self.planck_path + 'DR5_cluster-catalog_v1.1_forSZDB.fits')
df_act = pd.DataFrame(data={'RA': list(ACT[1].data['RA']), 'DEC': list(ACT[1].data['DEC']), 'GLON': list(ACT[1].data['GLON']), 'GLAT': list(ACT[1].data['GLAT'])})
df_spt = pd.DataFrame(data={'RA': list(SPT[1].data['RA']), 'DEC': list(SPT[1].data['DEC']), 'GLON': list(SPT[1].data['GLON']), 'GLAT': list(SPT[1].data['GLAT'])})
self.remove_duplicates_on_radec(df_MCXC, df_psz2, output_name='MCXC_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_REDMAPPER_30, df_psz2, output_name='RM30_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_REDMAPPER_50, df_psz2, output_name='RM50_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_act, df_psz2, output_name='ACT_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_spt, df_psz2, output_name='SPT_no_planck', plot=plot)
PSZ2.close()
MCXC.close()
MCXC.close()
REDMAPPER.close()
ACT.close()
SPT.close()
def create_fake_source_catalog(self):
PGCC = fits.open(self.planck_path + 'HFI_PCCS_GCC_R2.02.fits')
df_pgcc = pd.DataFrame(data={'RA': list(PGCC[1].data['RA']), 'DEC': list(PGCC[1].data['DEC']), 'GLON': list(PGCC[1].data['GLON']), 'GLAT': list(PGCC[1].data['GLAT'])})
PGCC.close()
df_pgcc.to_csv(self.path + 'catalogs/' + 'PGCC' + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
bands = ['100GHz', '143GHz', '217GHz', '353GHz', '545GHz', '857GHz']
cs_100 = fits.open(self.planck_path + 'COM_PCCS_100_R2.01.fits')
cs_143 = fits.open(self.planck_path + 'COM_PCCS_143_R2.01.fits')
cs_217 = fits.open(self.planck_path + 'COM_PCCS_217_R2.01.fits')
cs_353 = fits.open(self.planck_path + 'COM_PCCS_353_R2.01.fits')
cs_545 = fits.open(self.planck_path + 'COM_PCCS_545_R2.01.fits')
cs_857 = fits.open(self.planck_path + 'COM_PCCS_857_R2.01.fits')
df_cs_100 = pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})
df_cs_100.to_csv(self.path + 'catalogs/' + 'cs_100' + '.csv', index=False)
df_cs_143 = pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})
df_cs_143.to_csv(self.path + 'catalogs/' + 'cs_143' + '.csv', index=False)
df_cs_217 = pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})
df_cs_217.to_csv(self.path + 'catalogs/' + 'cs_217' + '.csv', index=False)
df_cs_353 = pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})
df_cs_353.to_csv(self.path + 'catalogs/' + 'cs_353' + '.csv', index=False)
df_cs_545 = pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})
df_cs_545.to_csv(self.path + 'catalogs/' + 'cs_545' + '.csv', index=False)
df_cs_857 = pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})
df_cs_857.to_csv(self.path + 'catalogs/' + 'cs_857' + '.csv', index=False)
freq = 0
if '100GHz' in bands:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in bands:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in bands:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in bands:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in bands:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in bands:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
for L in range(1, len(bands)):
for subset in tqdm(itertools.combinations(bands, L)):
freq = 0
if '100GHz' in subset:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in subset:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in subset:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in subset:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in subset:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in subset:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
cs_100.close()
cs_143.close()
cs_217.close()
cs_353.close()
cs_545.close()
cs_857.close()
def remove_duplicates_on_radec(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=5, plot=False):
""""Takes two different dataframes with columns 'RA' & 'DEC' and performs a spatial
coordinate match with a tol=5 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 5.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_main.query("ismatched == False", inplace=True)
df_main.drop(columns=['ismatched', 'ID'], inplace=True)
df_main = df_main.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
elif with_itself == False:
assert df_with_dup is not None
ID = np.arange(0, len(df_with_dup))
df_with_dup = df_with_dup[['RA', 'DEC']].copy()
df_with_dup.insert(loc=0, value=ID, column='ID')
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
pcatalog_sub = SkyCoord(ra=df_with_dup['RA'].values, dec=df_with_dup['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_with_dup.drop(columns=['RA', 'DEC'], inplace=True)
df_wo_dup = pd.merge(df_main, df_with_dup, indicator=True, on='ID', how='outer').query('_merge=="both"').drop('_merge', axis=1)
df_wo_dup.query("ismatched == False", inplace=True)
df_wo_dup.drop(columns=['ismatched', 'ID'], inplace=True)
df_wo_dup = df_wo_dup.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_wo_dup.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
df_main = df_wo_dup.copy()
if plot == True and output_name is not None:
fig = plt.figure(figsize=(8,8), tight_layout=False)
ax = fig.add_subplot(111)
ax.set_facecolor('white')
ax.grid(True, color='grey', lw=0.5)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.set_xlabel(r'$\mathrm{angular\;distance\;\left(arcmin\right)}$', fontsize=20)
ax.set_ylabel(output_name, fontsize=20)
ax.hist(np.array(df_d2d['d2d'].values)*60, bins = 400)
ax.axvline(tol, color='k', linestyle='--')
ax.set_xlim(0, 2*tol)
plt.savefig(self.output_path + 'figures/' + 'd2d_' + output_name + '.png', bbox_inches='tight', transparent=False)
plt.show()
plt.close()
return df_main
def remove_duplicates_on_lonlat(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=2, plot=False):
""""Takes two different dataframes with columns 'GLON' & 'GLAT' and performs a spatial
coordinate match with a tol=2 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 2.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = | pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import os
import datetime
import time
import random
import pandas as pd
import numpy as np
import re
import torch
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from tqdm import tqdm
from transformers import (
AdamW,
GPT2LMHeadModel,
GPT2Tokenizer,
GPT2Config,
get_linear_schedule_with_warmup
)
class WebNLGNoSeq2Seq(Dataset):
def __init__(self, tokenizer, max_source_length,
max_target_length, type_path):
super().__init__()
self.tokenizer = tokenizer
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.input_ids = []
self.attn_masks = []
self._build(type_path)
def __len__(self):
return len(self.input_ids)
def __getitem__(self, index):
return self.input_ids[index], self.attn_masks[index]
def _build(self, type_path):
if type_path == 'train':
df = pd.read_csv('Datasets/webnlg_train.csv')
elif type_path == 'eval':
df = pd.read_csv('Datasets/webnlg_dev.csv')
else:
df = pd.read_csv('Datasets/webnlg_test.csv')
# n = 1
# df = df.head(int(len(df)*(n/100)))
for index, row in df.iterrows():
line = row['input_text']
target = row['target_text']
encodings = self.tokenizer('<|startoftext|>'+ line + ' = ' + target + '<|endoftext|>', truncation=True, max_length=self.max_source_length, padding="max_length")
self.input_ids.append(torch.tensor(encodings['input_ids']))
self.attn_masks.append(torch.tensor(encodings['attention_mask']))
class DARTNoSeq2Seq(Dataset):
def __init__(self, tokenizer, max_source_length,
max_target_length, type_path):
super().__init__()
self.tokenizer = tokenizer
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.input_ids = []
self.attn_masks = []
self._build(type_path)
def __len__(self):
return len(self.input_ids)
def __getitem__(self, index):
return self.input_ids[index], self.attn_masks[index]
def _build(self, type_path):
if type_path == 'train':
df = pd.read_csv('Datasets/dart_train.csv')
elif type_path == 'eval':
df = pd.read_csv('Datasets/dart_dev.csv')
else:
df = pd.read_csv('Datasets/dart_test.csv')
# n = 1
# df = df.head(int(len(df)*(n/100)))
for index, row in df.iterrows():
line = row['input_text']
target = row['target_text']
encodings = self.tokenizer('<|startoftext|>'+ line + ' = ' + target + '<|endoftext|>', truncation=True, max_length=self.max_source_length, padding="max_length")
self.input_ids.append(torch.tensor(encodings['input_ids']))
self.attn_masks.append(torch.tensor(encodings['attention_mask']))
class BOTHNoSeq2Seq(Dataset):
def __init__(self, tokenizer, max_source_length,
max_target_length, type_path):
super().__init__()
self.tokenizer = tokenizer
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.input_ids = []
self.attn_masks = []
self._build(type_path)
def __len__(self):
return len(self.input_ids)
def __getitem__(self, index):
return self.input_ids[index], self.attn_masks[index]
def _build(self, type_path):
if type_path == 'train':
df1 = pd.read_csv('Datasets/dart_train.csv')
df2 = pd.read_csv('Datasets/webnlg_train.csv')
elif type_path == 'eval':
df1 = | pd.read_csv('Datasets/dart_dev.csv') | pandas.read_csv |
#!/usr/bin/env python3
"""
Create pandas dataframe from downloaded csv files and categorize sectors
"""
import csv
import pandas as pd
import sys
import json
import pdb
def main():
frl = []
for i in range(2, len(sys.argv)-1):
df = pd.read_csv(sys.argv[i], encoding = "ISO-8859-1")
frl.append(df)
#Create SectorID columns to categorize sectors as integers (categorization was done manually)
with open('SectorDict.json', 'r') as sd:
sector_dict = json.loads(sd.read())
for df in frl:
for i in range(len(df)):
for k, v in sector_dict.items():
if df.at[i, 'Sector'].lower() in v:
df.at[i, 'SectorID'] = int(k)
df = | pd.concat(frl, sort=False) | pandas.concat |
from redisclustergrid import StrictRedisCluster
import pandas as pd
host = "10.11.153.125"
startup_nodes = [
{"host": host, "port": "7000"},
{"host": host, "port": "7001"},
{"host": host, "port": "7002"},
{"host": host, "port": "7003"},
{"host": host, "port": "7004"},
{"host": host, "port": "7005"},
]
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)
df = | pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) | pandas.DataFrame |
# -----------------------------------------------------------
# <NAME>
# -----------------------------------------------------------
import streamlit as st
import pandas as pd
import numpy as np
from sodapy import Socrata
import pydeck as pdk
import plotly.express as px
import requests
# from IPython.display import Image
with open("style.css") as f:
st.markdown('<style>{}</style>'.format(f.read()), unsafe_allow_html=True)
@st.cache(persist=True)
def giphy_path():
path = "https://media.giphy.com/media/rS9tqucvXWwuY/giphy.gif"
return path
path = giphy_path()
points_table_data_url = 'https://www.iplt20.com/points-table/2020'
html = requests.get(points_table_data_url).content
df_list_points_table = | pd.read_html(html) | pandas.read_html |
from datetime import datetime
from functools import reduce
import logging
from multiprocessing import cpu_count
from multiprocessing import Pool
import os
from bs4 import BeautifulSoup
import urllib
import pandas as pd
import numpy as np
NCORES = cpu_count()
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.DEBUG)
def retrieve_general_data(url_page):
"""Scraping items from search page.
:param url_page: url of webpage to make the scraping.
:type url_page: str
:return: Dataframe with information of each item
:rtype: pandas.DataFrame
:Example:
>>> from scraping_details import retrieve_general_data
>>> url_page = 'https://www.infocasas.com.uy/venta/inmuebles/montevideo/pagina3'
>>> retrieve_general_data(url_page)
"""
logging.debug('%s', url_page)
url_base = '/'.join(url_page.split('/')[:3])
try:
page = urllib.request.urlopen(url_page)
except urllib.error.HTTPError as e:
print('HTTPError: {}'.format(e.code))
return pd.DataFrame([])
except urllib.error.URLError as e:
print('URLError: {}'.format(e.reason))
return pd.DataFrame([])
soup = BeautifulSoup(page, 'html.parser')
next_page = (soup.find('div', attrs={'id': 'paginado'})
.find('a', attrs={'class': 'next'}))
if next_page and (url_page < next_page.attrs['href']):
result = pd.DataFrame({})
else:
table = soup.find_all('div', attrs={'class': 'propiedades-slider'})
neighborhood = [
[k.text for k in p.find_all('p')] for t in table
for p in t.find_all('div')
if 'singleLineDots' in p['class']
]
price = [p.text.split()[-1] for t in table
for p in t.find_all('div') if 'precio' in p['class']]
desc = [[k.text for k in p.find_all('p')] for t in table
for p in t.find_all('div') if
'inDescription' in p['class']]
desc = [k[0] for k in desc]
details = [[d.find_all('span')[0].text for d in p.find_all('div')]
for t in table for p in t.find_all('div')
if 'contentIcons' in p['class']]
details = pd.DataFrame(details,
columns=['rooms', 'bathrooms', 'area_m2'])
data_id = [k.get('data-id', '') for k in table]
data_idproject = [k.get('data-idproyecto', '') for k in table]
link = [url_base + k.find('a')['href'] for k in table]
proyecto_label = [
k.find(class_='proyectoLabel').get_text() if k.find(
class_='proyectoLabel') else None for k in table]
df = pd.DataFrame(neighborhood, columns=['neighborhood', 'type'])
df['price'] = price
df['desc'] = desc
df['url'] = link
df['id'] = data_id
df['idproject'] = data_idproject
df['project_label'] = proyecto_label
df['page'] = url_page
result = pd.concat([details, df], axis=1)
return result
def retrieve_property_details(url_page):
"""Scraping details of a item from its web page.
:param url_page: url of webpage to make the scraping.
:type url_page: str
:return: Dataframe with information of each item
:rtype: pandas.DataFrame
:Example:
>>> from scraping_details import retrieve_property_details
>>> url_page = 'https://www.infocasas.com.uy/venta-edificio-o-local-jacinto-vera-ideal-colegios-o-empresa-1554m/185865494?v'
>>> retrieve_property_details(url_page)
"""
logging.debug('%s', url_page)
try:
page = urllib.request.urlopen(url_page)
except urllib.error.HTTPError as e:
print('HTTPError: {}'.format(e.code))
return pd.Series({'uri': url_page})
except urllib.error.URLError as e:
print('URLError: {}'.format(e.reason))
return pd.Series({'uri': url_page})
soup = BeautifulSoup(page, 'html.parser')
ficha_tecnica = soup.find_all(class_='ficha-tecnica')
amenities = soup.find_all(id='amenities')
description = soup.find(id='descripcion')
agency = soup.find('p', class_='titulo-inmobiliaria')
price = soup.find('p', class_='precio-final')
title = soup.find('h1', class_='likeh2 titulo one-line-txt')
kind = soup.find('p', class_='venta')
# visitation = soup.find_all(class_='allContentVisitation')
details = {item.find('p').get_text()[:-1].replace(' ', '_'): item.find('div').get_text()
for item in ficha_tecnica[0].find_all(class_='lista')} if ficha_tecnica else {}
details['extra'] = ','.join(
[key.find('p').get_text() for key in amenities[0].find_all(class_='lista active')]) if amenities else ''
details['descripcion'] = '. '.join([p.get_text() for p in description.find_all('p')]) if description else ''
details['url'] = url_page
details['inmobiliaria'] = agency.get_text() if agency else ''
details['precio'] = price.get_text() if price else ''
details['titulo_publicacion'] = title.get_text() if title else ''
details['tipo_propiedad'] = kind.get_text() if kind else ''
return pd.Series(details)
def pool_property_details(urls, outputfile):
with Pool(NCORES) as p:
result = [p.apply_async(retrieve_property_details, (url,))
for url in urls]
result = [k.get() for k in result]
df_result = reduce(lambda x, y: pd.concat([x, y], axis=1, sort=True),
result).transpose().reset_index(drop=True)
df_result.to_csv(outputfile, index=False)
def pool_general_data(urls, outputfile):
with Pool(NCORES) as p:
result = [p.apply_async(retrieve_general_data, (url,)) for url in urls]
result = [k.get() for k in result]
df_result = reduce(lambda x, y: pd.concat([x, y], axis=0, sort=True),
result).reset_index(drop=True)
if not df_result.empty:
df_result.to_csv(outputfile, index=False)
is_df_valid = 1
else:
is_df_valid = 1
return is_df_valid
def generate_raw_dataset(url_base='https://www.infocasas.com.uy',
search_path='/venta/inmuebles/montevideo/',
output_file='raw_home_for_sale_dataset',
):
basepath = os.path.dirname(os.path.realpath(__file__))
output_path = os.path.join(
os.sep.join(basepath.split(os.sep)[:-1]), 'data/raw/')
suffix = datetime.today().strftime('%Y-%m-%d')
output_file = '{}_{}'.format(output_file, suffix)
tmp_path = os.path.join(output_path, 'details_rent/')
output_file_tmp = tmp_path + output_file + '_{idx}.csv'
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
is_valid_range = 1
idx = 0
while is_valid_range:
subset = np.arange(idx * 10, (idx+1) * 10)
# for idx, subset in enumerate(np.array_split(np.arange(955), 100)):
output_file = output_file_tmp.format(name=output_file_tmp, idx=idx)
web_page = url_base + search_path + 'pagina{}'
urls = [web_page.format(k) for k in subset]
is_valid_range = pool_general_data(urls, output_file)
idx += 1
dfs = [pd.read_csv(tmp_path+key) for key in os.listdir(tmp_path)]
output = reduce(lambda x, y: | pd.concat([x, y], sort=True) | pandas.concat |
Subsets and Splits