prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Function to save laminate design set-up
- save_objective_function_BELLA:
saves the objective function parameters on Sheet [Objective function]
- save_multipanel:
saves the data of the multipanel structure:
- panel geometry
- panel thickness targets
- panel lamination parameter targets
- lamination parameter first-level sensitivities
- boundaries accross panels
- save_constraints_BELLA
save the design and manufacturing constraints on Sheet [Constraints]
- save_parameters_BELLA
saves the optimiser parameters on Sheet [Parameters]
- save_materials
saves the material properties on Sheet [Materials]
"""
import sys
import numpy as np
import pandas as pd
sys.path.append(r'C:\BELLA')
from src.divers.excel import append_df_to_excel
from src.CLA.lampam_functions import calc_lampam
from src.BELLA.format_pdl import convert_sst_to_ss
from src.guidelines.ipo_oopo import calc_penalty_ipo_oopo_mp
from src.guidelines.contiguity import calc_penalty_contig_mp
from src.guidelines.disorientation import calc_number_violations_diso_mp
from src.guidelines.ten_percent_rule import calc_penalty_10_ss
from src.guidelines.ply_drop_spacing import calc_penalty_spacing
from src.buckling.buckling import buckling_factor
def save_materials(filename, materials):
"""
saves the material properties on Sheet [Materials]
"""
table_mat = pd.DataFrame()
table_mat.loc[0, 'E11'] = materials.E11
table_mat.loc[0, 'E22'] = materials.E22
table_mat.loc[0, 'G12'] = materials.G12
table_mat.loc[0, 'nu12'] = materials.nu12
table_mat.loc[0, 'nu21'] = materials.nu21
table_mat.loc[0, 'areal density'] = materials.density_area
table_mat.loc[0, 'volumic density'] = materials.density_volume
table_mat.loc[0, 'ply thickness'] = materials.ply_t
table_mat.loc[0, 'Q11'] = materials.Q11
table_mat.loc[0, 'Q12'] = materials.Q12
table_mat.loc[0, 'Q22'] = materials.Q22
table_mat.loc[0, 'Q66'] = materials.Q66
table_mat.loc[0, 'U1'] = materials.U1
table_mat.loc[0, 'U2'] = materials.U2
table_mat.loc[0, 'U3'] = materials.U3
table_mat.loc[0, 'U4'] = materials.U4
table_mat.loc[0, 'U5'] = materials.U5
table_mat = table_mat.transpose()
append_df_to_excel(
filename, table_mat, 'Materials', index=True, header=False)
def save_multipanel(
filename, multipanel, obj_func_param, sst=None,
calc_penalties=False, constraints=None, mat=None, save_buckling=False):
"""
saves the data of the multipanel structure:
- panel geometry
- panel thickness targets
- panel lamination-parameter targets
- lamination parameter first-level sensitivities
- boundaries accross panels
- constraints: design guidelines
- sst: stacking sequence table
"""
table_mp = | pd.DataFrame() | pandas.DataFrame |
# Futu Algo: Algorithmic High-Frequency Trading Framework
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Written by <NAME> <<EMAIL>>, 2021
# Copyright (c) billpwchan - All Rights Reserved
import csv
import glob
import json
import os
import sqlite3
from datetime import datetime, timedelta
from multiprocessing import Pool, cpu_count
from pathlib import Path
import humanize
import openpyxl
import pandas as pd
import requests
import yfinance as yf
from util import logger
from util.global_vars import *
class DatabaseInterface:
def __init__(self, database_path):
Path("./database/").mkdir(parents=True, exist_ok=True)
self.conn = sqlite3.connect(database_path)
self.cur = self.conn.cursor()
def execute(self, query, data):
self.cur.execute(query, data)
return self.cur
def commit(self):
self.conn.commit()
def get_stock_list(self) -> list:
self.cur.execute('SELECT DISTINCT code FROM stock_data')
return [item[0] for item in self.cur.fetchall()]
def add_stock_data(self, code, time_key, open, close, high, low, pe_ratio, turnover_rate, volume, turnover,
change_rate, last_close, k_type):
# if self.check_stock_data_exist(code, time_key, k_type):
return self.execute(
"INSERT OR IGNORE INTO stock_data VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(None, code, time_key, open, close, high, low, pe_ratio, turnover_rate, volume, turnover, change_rate,
last_close,
k_type)
)
def add_stock_pool(self, date, filter, code):
return self.execute("INSERT OR IGNORE INTO stock_pool VALUES(?, ?, ?, ?)", (None, date, filter, code))
def get_stock_pool(self, date, filter):
# NOT FINISHED YET.
self.cur.execute("SELECT date, filter, code FROM stock_pool WHERE date=? and filter=?", (date, filter))
def add_stock_info(self, code, name):
return self.execute("INSERT OR IGNORE INTO stock_info VALUES(?, ?, ?)", (None, code, name))
def delete_stock_pool_from_date(self, date):
return self.execute("DELETE FROM stock_pool WHERE date=?", (date,))
def __del__(self):
""" Destroys instance and connection on completion of called method """
self.conn.close()
class DataProcessingInterface:
default_logger = logger.get_logger("data_processing")
@staticmethod
def get_1M_data_range(date_range: list, stock_list: list) -> dict:
"""
Get 1M Data from CSV based on Stock List. Returned in Dict format
:param date_range: A list of Date in DateTime Format (YYYY-MM-DD)
:param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])
:return: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}
"""
output_dict = {}
for stock_code in stock_list:
# input_df refers to the all the 1M data from start_date to end_date in pd.Dataframe format
input_df = pd.concat(
[pd.read_csv(f'./data/{stock_code}/{stock_code}_{input_date}_1M.csv', index_col=None) for input_date in
date_range if
Path(f'./data/{stock_code}/{stock_code}_{input_date}_1M.csv').exists() and (not pd.read_csv(
f'./data/{stock_code}/{stock_code}_{input_date}_1M.csv').empty)],
ignore_index=True)
input_df[['open', 'close', 'high', 'low']] = input_df[['open', 'close', 'high', 'low']].apply(pd.to_numeric)
input_df.sort_values(by='time_key', ascending=True, inplace=True)
output_dict[stock_code] = output_dict.get(stock_code, input_df)
return output_dict
@staticmethod
def get_custom_interval_data(target_date: datetime, custom_interval: int, stock_list: list) -> dict:
"""
Get 5M/15M/Other Customized-Interval Data from CSV based on Stock List. Returned in Dict format
:param target_date: Date in DateTime Format (YYYY-MM-DD)
:param custom_interval: Customized-Interval in unit of "Minutes"
:param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])
:return: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}
"""
input_data = {}
for stock_code in stock_list:
input_path = f'./data/{stock_code}/{stock_code}_{str(target_date)}_1M.csv'
if not Path(input_path).exists():
continue
input_csv = pd.read_csv(input_path, index_col=None)
# Non-Trading Day -> Skip
if input_csv.empty:
continue
# Set Time-key as Index & Convert to Datetime
input_csv = input_csv.set_index('time_key')
input_csv.index = pd.to_datetime(input_csv.index, infer_datetime_format=True)
# Define Function List
agg_list = {
"code": "first",
"open": "first",
"close": "last",
"high": "max",
"low": "min",
"pe_ratio": "last",
"turnover_rate": "sum",
"volume": "sum",
"turnover": "sum",
}
# Group from 09:31:00 with Freq = 5 Min
minute_df = input_csv.groupby(pd.Grouper(freq=f'{custom_interval}Min', closed='left', offset='1min')).agg(
agg_list)[1:]
# For 1min -> 5min, need to add Timedelta of 4min
minute_df.index = minute_df.index + pd.Timedelta(minutes=int(custom_interval - 1))
# Drop Lunch Time
minute_df.dropna(inplace=True)
# Update First Row (Special Cases) e.g. For 1min -> 5min, need to use the first 6min Rows of data
minute_df.iloc[0] = \
input_csv.iloc[:(custom_interval + 1)].groupby('code').agg(agg_list).iloc[0]
# Update Last Close Price
last_index = minute_df.index[0]
minute_df['change_rate'] = 0
minute_df['last_close'] = input_csv['last_close'][0]
minute_df.loc[last_index, 'change_rate'] = 100 * (float(minute_df.loc[last_index, 'close']) - float(
minute_df.loc[last_index, 'last_close'])) / float(minute_df.loc[last_index, 'last_close'])
# Change Rate = (Close Price - Last Close Price) / Last Close Price * 100
# Last Close = Previous Close Price
for index, row in minute_df[1:].iterrows():
minute_df.loc[index, 'last_close'] = minute_df.loc[last_index, 'close']
minute_df.loc[index, 'change_rate'] = 100 * (
float(row['close']) - float(minute_df.loc[last_index, 'close'])) / float(
minute_df.loc[last_index, 'close'])
last_index = index
minute_df.reset_index(inplace=True)
column_names = json.loads(config.get('FutuOpenD.DataFormat', 'HistoryDataFormat'))
minute_df = minute_df.reindex(columns=column_names)
# Convert Timestamp type column to standard String format
minute_df['time_key'] = minute_df['time_key'].dt.strftime('%Y-%m-%d %H:%M:%S')
input_data[stock_code] = input_data.get(stock_code, minute_df)
return input_data
@staticmethod
def validate_1M_data(date_range: list, stock_list: list, trading_days: dict):
raise NotImplementedError
# TODO: Validate data against futu records
@staticmethod
def check_empty_data(input_file: str):
input_csv = | pd.read_csv(input_file, index_col=None) | pandas.read_csv |
import os, glob
import pandas as pd
def selectX(df_dict=df_dict, ids=ids, x=100):
selectX = | pd.DataFrame(columns=df_dict[ids[0]].columns) | pandas.DataFrame |
# parser.py - Special parser for reading and writing *.tsv files with pandas.
# Import pandas library for parsing dataframes.
import pandas as pd
# For parsing MFI tables specifically.
def read_mfi(path, title="Metric", countries=['AFG', 'JPN']):
"""Special parser for reading an MFI table.
:param path: Path to the table to parse.
:param title: fieldname to assign the value column, defaults to 'Metric'
"""
# Parse the MFI table.
df = read_tsv(path)
# Rename the columns.
df.columns = [ "Code", "Country", "Year", title ]
# Select only the entries that have matching codes.
df = df[df["Code"].isin(countries)]
# Add 'Time' column with DataTime type values.
# df['Time'] = pd.to_datetime(df['Year'], format="%Y")
# Create a MultiIndex in the pd.DataFrame.
# df = df.set_index(['Code', 'Time'], drop=True)
# Sort by country and year.
df = df.sort_values(by=['Code', 'Year'], ascending=True)
# Retain the old index.
df = df.reset_index(drop=False)
# Rename the index category.
df = df.rename(columns={'index': 'original_index'})
# Return the table.
return df
def read_tsv(filepath_or_buffer, **kwargs):
"""Read a tab-separated values (tsv) file into DataFrame.
:param filepath_or_buffer: Any valid string path is acceptable. The string could be a URL.
:param **kwargs: See expected keyword arguments for pandas.read_csv()
:return: DataFrame or TextParser : Parsed file is returned as two-dimensional data structure with labeled axes.
"""
return pd.read_csv(filepath_or_buffer, **dict(kwargs, sep="\t"))
def to_tsv(data, *args, **kwargs):
"""Write object to a tab-separated values (tsv) file.
:param data: Data to write.
:param *args: See expected positional arguments for pandas.to_csv()
:param **kwargs: See expected keyword arguments for pandas.to_csv()
:return: None or str : If path_or_buf is None, returns the resulting tsv format as a string. Otherwise returns None.
"""
if isinstance(data, pd.DataFrame):
_data = data
elif isinstance(data, pd.Series):
_data = data.to_frame()
else:
_data = | pd.DataFrame(data) | pandas.DataFrame |
import os
import requests
import json
from mapLight.dirs import *
from mapLight.key import apiKey
def downloadBills(jurisdiction,session,includePositions=True,allBills=False):
params = {'jurisdiction':jurisdiction,
'session':session,
'include_organizations':int(includePositions),
'has_organizations':int(not allBills),
'apikey':apiKey}
url = r'http://classic.maplight.org/services_open_api/map.bill_list_v1.json'
result = requests.get(url,params)
result.raise_for_status()
return result.json()['bills']
if __name__ == '__main__':
if not os.path.isdir(positionsDir):
os.makedirs(positionsDir)
# Download all sessions of congress
jurisdiction = 'us'
for session in range(109,116):
print('downloading {} session {}'.format(jurisdiction,session))
bills = downloadBills(jurisdiction='us',session=session)
with open(os.path.join(positionsDir,'{}_{}.json'.format(jurisdiction,session)),'w',encoding='utf8') as f:
json.dump(bills,f)
# Compile positions.csv
import pandas as pd
billDFs = []
for jsonFile in [f for f in os.listdir(positionsDir) if f.endswith('.json')]:
with open(os.path.join(positionsDir,jsonFile),'r',encoding='utf8') as f:
bills = json.load(f)
for bill in bills:
df = pd.DataFrame(bill['organizations'])
for c in [k for k in bill.keys() if k != 'organizations']:
df[c] = bill[c]
billDFs.append(df)
positionsDF = | pd.concat(billDFs) | pandas.concat |
from datetime import datetime
from io import StringIO
import numpy
import pandas
import pytest
from hts.hierarchy import HierarchyTree
from hts.utilities.load_data import load_hierarchical_sine_data, load_mobility_data
@pytest.fixture
def events():
s = """ts,start_latitude,start_longitude,city
2019-12-06 12:29:16.789,53.565173,9.959418,hamburg
2019-12-06 12:28:37.326,50.120962,8.674268,frankfurt
2019-12-06 12:27:07.055,52.521168,13.410618,berlin
2019-12-06 12:26:25.989,51.492683,7.417612,dortmund
2019-12-06 12:25:40.222,52.537730,13.417372,berlin
2019-12-06 12:25:25.309,50.948847,6.951802,cologne
2019-12-06 12:23:53.633,48.166799,11.577420,munich
2019-12-06 12:23:05.292,50.113883,8.675192,frankfurt
2019-12-06 12:22:56.059,50.114847,8.672653,frankfurt
2019-12-06 12:22:39.471,50.943082,6.959962,cologne"""
df = pandas.read_csv(StringIO(s), index_col='ts', sep=',')
df.index = pandas.to_datetime(df.index)
return df
@pytest.fixture
def n_tree():
"""
This is the format of this tree
t 1
a b c 3
aa ab ba bb ca cb 6
aaa aab aba abb baa bab bba bbb caa cab cba cbb 12
Resulting in the summing matrix: y_t = S * b_t
t 1 1 1 1 1 1 1 1 1 1 1 1
a 1 1 1 1 0 0 0 0 0 0 0 0
b 0 0 0 0 1 1 1 1 0 0 0 0
c 0 0 0 0 0 0 0 0 1 1 1 1
aa 1 1 0 0 0 0 0 0 0 0 0 0
ab 0 0 1 1 0 0 0 0 0 0 0 0 aaa
ba 0 0 0 0 1 1 0 0 0 0 0 0 aab
bb 0 0 0 0 0 0 1 1 0 0 0 0 aba
ca 0 0 0 0 0 0 0 0 1 1 0 0 abb
cb 0 0 0 0 0 0 0 0 0 0 1 1 baa
aaa 1 0 0 0 0 0 0 0 0 0 0 0 bab
aab 0 1 0 0 0 0 0 0 0 0 0 0 bba
aba 0 0 1 0 0 0 0 0 0 0 0 0 bbb
abb 0 0 0 1 0 0 0 0 0 0 0 0 caa
baa 0 0 0 0 1 0 0 0 0 0 0 0 cab
bab 0 0 0 0 0 1 0 0 0 0 0 0 cba
bba 0 0 0 0 0 0 1 0 0 0 0 0 cbb
bbb 0 0 0 0 0 0 0 1 0 0 0 0
caa 0 0 0 0 0 0 0 0 1 0 0 0
cab 0 0 0 0 0 0 0 0 0 1 0 0
cba 0 0 0 0 0 0 0 0 0 0 1 0
cbb 0 0 0 0 0 0 0 0 0 0 0 1
"""
t = ('t', 1)
t1 = [('a', 2), ('b', 2), ('c', 3)]
t2 = [('aa', 4), ('ab', 5), ('ba', 6), ('bb', 4), ('ca', 5), ('cb', 6)]
t3 = [('aaa', 4), ('aab', 5), ('aba', 6), ('abb', 4), ('baa', 5),
('bab', 6), ('bba', 5), ('bbb', 6), ('caa', 5), ('cab', 6),
('cba', 5), ('cbb', 6)]
test_t = HierarchyTree(key=t[0], item=t[1])
for i, j in t1:
test_t.add_child(key=i, item=j)
for c in test_t.children:
for i, j in t2:
if i.startswith(c.key):
c.add_child(key=i, item=j)
for c in test_t.children:
for c2 in c.children:
for i, j in t3:
if i.startswith(c2.key):
c2.add_child(key=i, item=j)
return test_t
@pytest.fixture
def hierarchical_sine_data():
s, e = datetime(2019, 1, 15), datetime(2019, 10, 15)
return load_hierarchical_sine_data(s, e)
@pytest.fixture
def hierarchical_mv_data():
return load_mobility_data()
@pytest.fixture
def mv_tree(hierarchical_mv_data):
hier = {
'total': ['CH', 'SLU', 'BT', 'OTHER'],
'CH': ['CH-07', 'CH-02', 'CH-08', 'CH-05', 'CH-01'],
'SLU': ['SLU-15', 'SLU-01', 'SLU-19', 'SLU-07', 'SLU-02'],
'BT': ['BT-01', 'BT-03'],
'OTHER': ['WF-01', 'CBD-13']
}
exogenous = {k: ['precipitation', 'temp'] for k in hierarchical_mv_data.columns if
k not in ['precipitation', 'temp']}
return HierarchyTree.from_nodes(hier, hierarchical_mv_data, exogenous=exogenous)
@pytest.fixture
def sine_hier():
return {'total': ['a', 'b', 'c'],
'a': ['aa', 'ab'], 'aa': ['aaa', 'aab'],
'b': ['ba', 'bb'],
'c': ['ca', 'cb', 'cc', 'cd']}
@pytest.fixture
def uv_tree(sine_hier, hierarchical_sine_data):
hsd = hierarchical_sine_data.resample('1H').apply(sum).head(400)
return HierarchyTree.from_nodes(sine_hier, hsd)
@pytest.fixture
def load_df_and_hier_uv(sine_hier, hierarchical_sine_data):
return hierarchical_sine_data.resample('1H').apply(sum), sine_hier
@pytest.fixture
def sample_ds():
cid = numpy.repeat([10, 500], 40)
ckind = numpy.repeat(["a", "b", "a", "b"], 20)
csort = [30, 53, 26, 35, 42, 25, 17, 67, 20, 68, 46, 12, 0, 74, 66, 31, 32,
2, 55, 59, 56, 60, 34, 69, 47, 15, 49, 8, 50, 73, 23, 62, 24, 33,
22, 70, 3, 38, 28, 75, 39, 36, 64, 13, 72, 52, 40, 16, 58, 29, 63,
79, 61, 78, 1, 10, 4, 6, 65, 44, 54, 48, 11, 14, 19, 43, 76, 7,
51, 9, 27, 21, 5, 71, 57, 77, 41, 18, 45, 37]
cval = [11, 9, 67, 45, 30, 58, 62, 19, 56, 29, 0, 27, 36, 43, 33, 2, 24,
71, 41, 28, 50, 40, 39, 7, 53, 23, 16, 37, 66, 38, 6, 47, 3, 61,
44, 42, 78, 31, 21, 55, 15, 35, 25, 32, 69, 65, 70, 64, 51, 46, 5,
77, 26, 73, 76, 75, 72, 74, 10, 57, 4, 14, 68, 22, 18, 52, 54, 60,
79, 12, 49, 63, 8, 59, 1, 13, 20, 17, 48, 34]
df = pandas.DataFrame({"id": cid, "kind": ckind, "sort": csort, "val": cval})
df = df.set_index("id", drop=False)
df.index.name = None
return df
@pytest.fixture
def visnights_hier():
return {'total': ['NSW', 'OTH', 'WAU', 'SAU', 'QLD', 'VIC'],
'NSW': ['NSW_Metro', 'NSW_NthCo', 'NSW_NthIn', 'NSW_SthCo', 'NSW_SthIn'],
'OTH': ['OTH_Metro', 'OTH_NoMet'],
'QLD': ['QLD_Cntrl', 'QLD_Metro', 'QLD_NthCo'],
'SAU': ['SAU_Coast', 'SAU_Inner', 'SAU_Metro'],
'VIC': ['VIC_EstCo', 'VIC_Inner', 'VIC_Metro', 'VIC_WstCo'],
'WAU': ['WAU_Coast', 'WAU_Inner', 'WAU_Metro']}
@pytest.fixture
def hierarchical_visnights_data():
vis_idx = | pandas.date_range(start="1998-01-01", periods=8, freq="QS") | pandas.date_range |
import cv2
import numpy as np
import pandas as pd
import shutil
from tqdm import tqdm
from pathlib import Path
from utils import get_all_files_in_folder
from sklearn.model_selection import train_test_split
def create_splits_files(root_dir, val_split, test_split):
train_dir = Path('denred0_data/train_test_split/train')
if train_dir.exists() and train_dir.is_dir():
shutil.rmtree(train_dir)
Path(train_dir).mkdir(parents=True, exist_ok=True)
train_images_dir = Path('denred0_data/train_test_split/train/images')
if train_images_dir.exists() and train_images_dir.is_dir():
shutil.rmtree(train_images_dir)
Path(train_images_dir).mkdir(parents=True, exist_ok=True)
train_masks_dir = Path('denred0_data/train_test_split/train/masks')
if train_masks_dir.exists() and train_masks_dir.is_dir():
shutil.rmtree(train_masks_dir)
Path(train_masks_dir).mkdir(parents=True, exist_ok=True)
train_masks_rgb_dir = Path('denred0_data/train_test_split/train/masks_rgb')
if train_masks_rgb_dir.exists() and train_masks_rgb_dir.is_dir():
shutil.rmtree(train_masks_rgb_dir)
Path(train_masks_rgb_dir).mkdir(parents=True, exist_ok=True)
val_dir = Path('denred0_data/train_test_split/val')
if val_dir.exists() and val_dir.is_dir():
shutil.rmtree(val_dir)
Path(val_dir).mkdir(parents=True, exist_ok=True)
val_images_dir = Path('denred0_data/train_test_split/val/images')
if val_images_dir.exists() and val_images_dir.is_dir():
shutil.rmtree(val_images_dir)
Path(val_images_dir).mkdir(parents=True, exist_ok=True)
val_masks_dir = Path('denred0_data/train_test_split/val/masks')
if val_masks_dir.exists() and val_masks_dir.is_dir():
shutil.rmtree(val_masks_dir)
Path(val_masks_dir).mkdir(parents=True, exist_ok=True)
val_masks_rgb_dir = Path('denred0_data/train_test_split/val/masks_rgb')
if val_masks_rgb_dir.exists() and val_masks_rgb_dir.is_dir():
shutil.rmtree(val_masks_rgb_dir)
Path(val_masks_rgb_dir).mkdir(parents=True, exist_ok=True)
test_dir = Path('denred0_data/train_test_split/test')
if test_dir.exists() and test_dir.is_dir():
shutil.rmtree(test_dir)
Path(test_dir).mkdir(parents=True, exist_ok=True)
test_images_dir = Path('denred0_data/train_test_split/test/images')
if test_images_dir.exists() and test_images_dir.is_dir():
shutil.rmtree(test_images_dir)
Path(test_images_dir).mkdir(parents=True, exist_ok=True)
test_masks_dir = Path('denred0_data/train_test_split/test/masks')
if test_masks_dir.exists() and test_masks_dir.is_dir():
shutil.rmtree(test_masks_dir)
Path(test_masks_dir).mkdir(parents=True, exist_ok=True)
test_masks_rgb_dir = Path('denred0_data/train_test_split/test/masks_rgb')
if test_masks_rgb_dir.exists() and test_masks_rgb_dir.is_dir():
shutil.rmtree(test_masks_rgb_dir)
Path(test_masks_rgb_dir).mkdir(parents=True, exist_ok=True)
all_masks = get_all_files_in_folder(root_dir.joinpath('masks'), ['*.png'])
labels = []
images_list = []
for msk in tqdm(all_masks):
mask = cv2.imread(str(msk), cv2.IMREAD_GRAYSCALE)
classes = np.unique(mask)
for cl in classes:
labels.append(cl)
images_list.append(msk.stem)
# classes + counts
labels_dict = | pd.DataFrame(labels, columns=["x"]) | pandas.DataFrame |
# Импортируем стандартный модуль для рендеринга страниц
from django.shortcuts import render
# Импортируем стандартные модули для пагинации страниц
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Подключаем модуль для фиксирования времени
import time
# Подключаем модуль для анализа pandas
import pandas as pd
# Подключаем конфигурационный файл для импорта констант
from django.conf import settings
'''
Модуль для создания дампов. Будет использован только для pandas, поскольку модуль намного медленее работает чем numpy VG
Пример кода для создания дампа
dt = pd.read_csv('base.txt', index_col=False, delimiter=';', names=['date', 'hours', 'minutes', 'seconds', 'gap', 'grp', 'v', 'gi', 's1', 's2', 's3'])
dt.to_pickle('pandas.pickle', compression='infer')
'''
import pickle
# Создаем вид для рендеринга главной страници
def index(request):
return render(request, 'home.html')
# Создаем вид для рендеринга страницы формы
def pandas_page(request):
return render(request, 'pandas_page.html')
# Создаем вид для обработки вариантов pandas
def pandas_processing(request):
# Обявляем глобальные переменные т.к будем работать не только с post запросами
global end
global pandas_data
# Проверяем тип запроса формы
if request.method == "POST":
# Получаем значение варианта из формы
c = request.POST.get('choice', None)
# Обработка варианта 1
if c == 'c1':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Поиск значений global_active_power что больше 5
pandas_data = pandas_data[pandas_data['gap'] > 5]
# Полученый фрейм конвертируем в список и передаем в качестве аргумента, также ставим мааксимальное количесто строк на одной странице
paginator = Paginator(pandas_data.values.tolist(), 1000)
# Фиксируем время исполнения
end = time.time() - start
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
# Обработка варианта 2
elif c == 'c2':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Поиск значений voltage что больше 235
pandas_data = pandas_data[pandas_data['v'] > 235]
# Для вольтажа был создан отдельный дамп, поскольку поиск всех значений требовал бы больше ресурсов сервера
with open(settings.VOLTAGE_DUMP, 'rb') as handle:
# Присваиваем значение для пагинатора
paginator = pickle.load(handle)
# Фиксируем время
end = time.time() - start
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
# Обработка варианта 3
elif c == 'c3':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Поиск значений global_intensity что больше 19 и меньше 20
pandas_data = pandas_data[(pandas_data['gi'] >= 19) & (pandas_data['gi'] <= 20)]
# Полученый фрейм конвертируем в список и передаем в качестве аргумента, также ставим мааксимальное количесто строк на одной странице
paginator = Paginator(pandas_data.values.tolist(), 1000)
# Фиксируем время
end = time.time() - start
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
# Обработка варианта 4
elif c == 'c4':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Генерируем 500000 уникальных строк
pandas_data = pandas_data.sample(n=500000, replace=True)
# Расчет среднего значения для группы 1
s1_average = sum(pandas_data['s1'])/500000
# Расчет среднего значения для группы 2
s2_average = sum(pandas_data['s2'])/500000
# Расчет среднего значения для группы 3
s3_average = sum(pandas_data['s3'])/500000
# Поиск записей после 18:00
pandas_data = pandas_data[(pandas_data['hours'] >= 18) & (pandas_data['minutes'] > 0)]
# Из предидушего фрейма выводим те в которых global_active_power больше 6
pandas_data = pandas_data[pandas_data['gap'] > 6]
# Из полученого результата ищем все записи где группа 2 больше группы 1 и группы 3
pandas_data = pandas_data[(pandas_data['s2'] > pandas_data['s1']) & (pandas_data['s2'] > pandas_data['s3'])]
# Ищем размер полученого фрейма и делим пополам
l = len(pandas_data) // 2
# Делаем срез 1й части
first_part = pandas_data[:l]
# Из первой части выбираем каждое 3е значение
first_part = first_part[::3]
# Делаем срез 2й части
second_part = pandas_data[l:]
# Из второй части выбираем каждое 4е значение
second_part = second_part[::4]
# Создаем список из полученых частей
f = [first_part, second_part]
# Обединяем 2 части в 1
pandas_data = | pd.concat(f) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
import os
from scipy import array
from scipy.interpolate import interp1d
def subst(x, str_re, loc):
"""
Parameters:
-----------
x : str, the string to be updated
str_re : str, the new string to replace
loc : int or numpy.array, the index of where to replace x with y
Returns:
--------
x_new : updated new string
"""
if isinstance(loc, int):
if loc == -1:
x_new = x[:loc] + str_re
else:
x_new = x[:loc] + str_re + x[loc+1:]
elif loc[-1] == -1:
x_new = x[:loc[0]] + str_re
else:
x_new = x[:loc[0]] + str_re + x[loc[1]+1:]
return x_new
# End subst()
def prep_cq(concentration, cons_name, discharge):
"""
Parameters:
------------
fnames : list, list of file names
index_file : int, the index of file to read
loc : np.array, location where the string to be replaced
savefile : Bool, optional, save files if True
Returns:
------------
files saved to the given directory.
"""
concentration.rename(columns={concentration.columns[0]: 'Time',
concentration.columns[1]: '126001A-' + cons_name + '(mg/l)'}, inplace=True)
concentration.drop(index=[0, 1], inplace=True)
concentration.dropna(how='all', inplace=True, axis=1)
# Match C-Q data according to time
# Convert the string in "Time" to datetime
concentration = conc_time_shift(concentration, time_format="%Y/%m/%d %H:%M")
concentration = duplicate_average(concentration).set_index('Time')
# concentration.drop_duplicates(keep='first', inplace=True)
cq = combine_cq(concentration, discharge)
return cq
# End prep_cq()
def combine_cq(concentration, discharge):
cq = pd.concat([concentration, discharge], axis=1, join='inner')
cq.reset_index(inplace=True)
cols = cq.columns
cq.loc[:, cols[1:]] = cq.loc[:, cols[1:]].astype(float)
cq.rename(columns = {cols[-1] : 'Flow(m3)'}, inplace=True)
return cq
# End combine_cq()
def conc_time_convert(concentration, loc, time_format="%Y/%m/%d %H:%M"):
""" Assumptions: 1) If there are more than one obs C in an hour, the average of the C is used
2) the mean of flow
"""
monitor_minute = concentration['Time'][2][-2:]
if monitor_minute == '00':
concentration['Time'] = concentration['Time'].apply(subst, args=('00', loc[1]))
else:
concentration['Time'] = concentration['Time'].apply(subst, args=('00', loc[0]))
concentration['Time'] = pd.to_datetime(concentration['Time'], format=time_format)
concentration.set_index(['Time'], inplace=True)
return concentration
# End conc_time_convert()
def conc_time_shift(concentration, time_format="%Y/%m/%d %H:%M"):
"""
Shift the time for linear interpolation.
"""
concentration['Time'] = pd.to_datetime(concentration['Time'], format=time_format)
index_start = concentration.index[0]
for ii in range(index_start, len(concentration['Time'])+index_start):
minute_ii = concentration['Time'][ii].minute
if minute_ii > 30:
concentration['Time'][ii] = concentration['Time'][ii] + datetime.timedelta(minutes=(60 - minute_ii))
else:
concentration['Time'][ii] = concentration['Time'][ii] - datetime.timedelta(minutes=minute_ii)
return concentration
# End conc_time_shift()
def conc_interpolate(concentration, flow):
"""
Interpolate constituent concentration linearly and return the hourly loads.
Follow the assumptions for linear interpolation.
The index of the two dataframes follows the same format.
Instead of adding tie down, the first and the last values are treated as the added points.
"""
conc_time = concentration.index
flow = flow.loc[conc_time[0]:conc_time[-1], :]
cols_flow = flow.columns
cols_conct = concentration.columns[0]
# 插值
flow.loc[conc_time[0], cols_conct] = concentration.loc[conc_time[0], :].values
for ii in range(0, len(conc_time)-1):
delta_time = (conc_time[ii+1] - conc_time[ii]).total_seconds() / 3600
if delta_time > 24:
print(delta_time, conc_time[ii+1])
if delta_time == 1:
flow.loc[conc_time[ii], cols_conct] = concentration.loc[conc_time[ii], :].values
elif delta_time > 1:
x_temp = flow.loc[conc_time[ii]:conc_time[ii+1], cols_flow[0]].values
x = [x_temp[0], x_temp[-1]]
y = [concentration.loc[conc_time[ii], :].values[0], concentration.loc[conc_time[ii+1], :].values[0]]
f = interp1d(x, y)
f_x = extrap1d(f)
x_new = x_temp[1:-1]
y_new = f_x(x_new)
# import pdb; pdb.set_trace()
flow.loc[conc_time[ii]:conc_time[ii+1], cols_conct] = \
[concentration.loc[conc_time[ii], :].values[0], *y_new, concentration.loc[conc_time[ii+1], :].values[0]]
else:
raise AssertionError("The time interval is not hourly.")
flow.loc[conc_time[-1], cols_conct] = concentration.loc[conc_time[-1], :].values
flow['Loads'] = flow[cols_flow] * \
flow[cols_conct].values.reshape(flow[cols_conct].values.shape[0], 1)
return flow
# End conc_interpolate()
def emc_cal(load, discharge):
"""
Calculate EMC with \sum{l_t}/ \sum{discharge_t}
load: numpy.ndarry, load time series
discharge: numpy.ndarry, discharge time series
"""
return load.sum() / discharge.sum()
# End emc_cal()
def extrap1d(interpolator):
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if xs[0] == xs[1]:
return xs.mean()
elif x < xs[0]:
y_return = ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
if y_return > 0:
return y_return
else:
return ys.min()
elif x > xs[-1]:
return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else:
return interpolator(x)
def ufunclike(xs):
return list(map(pointwise, array(xs)))
return ufunclike
# End extrap1d()
# define time period
def rainfall_events(filename):
events = pd.read_csv(filename, index_col='ID')
return events
# End rainfall_events()
def load_flow_index_consistent(load_flow):
# subset the conc_flow data
index_list = list(load_flow.index)
start_index = index_list.index('1/7/2017')
for ii in range(start_index, len(index_list)):
st_split = index_list[ii].split('/')
index_list[ii] = f'{st_split[-1]}/{st_split[1]}/{st_split[0]}'
load_flow.index = index_list
# load_flow.to_csv('../data/low_interp_flow.csv')
return load_flow
# End load_flow_index_consistent()
def duplicate_average(concentration):
"""
This is used to average the duplicates of concentration.
"""
duplicateCheck = concentration.duplicated(subset=['Time'], keep=False)
# import pdb; pdb.set_trace()
if concentration[duplicateCheck].shape[0] > 0:
duplicate_time = set(concentration[duplicateCheck]['Time'])
for tmp in duplicate_time:
ind = concentration[concentration.Time == tmp].index
concentration.loc[ind, concentration.columns[-1]] = \
concentration.loc[ind, concentration.columns[-1]].mean(axis=0)
concentration.drop_duplicates(keep = 'first')
return concentration
# End duplicate_average()
def hourly_cq(flow_name, childpath, outpath):
discharge = pd.read_csv(flow_name)
discharge['Time'] = | pd.to_datetime(discharge['Time'], format="%H:%M:%S %d/%m/%Y") | pandas.to_datetime |
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import datetime
import sklearn.datasets, sklearn.decomposition
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.preprocessing import StandardScaler
import sklearn_extra
from scipy import stats
from scipy.stats import kurtosis, skew
from collections import defaultdict
import statistics
from itertools import chain
from scipy.interpolate import interp1d
from collections import defaultdict
from nested_dict import nested_dict
import DES_weather_analysis
from DES_weather_analysis import clustring_kmean_forced, clustring_kmediod_PCA_operation, EPW_to_csv,solar_irradiance,solar_position
from DES_weather_analysis.solar_irradiance import aoi, get_total_irradiance
from DES_weather_analysis.solar_position import get_solarposition
JtokWh = 2.7778e-7
def kmedoid_clusters(path_test,mode):
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
scenario_reduction_path= os.path.join(path_test,'ScenarioReduction')
scenarios_path = os.path.join(path_test,'ScenarioGeneration')
if not os.path.exists(scenario_reduction_path):
os.makedirs(scenario_reduction_path)
representative_days_path = scenario_reduction_path
num_scenario = 0
num_scenarios = int(editable_data['num_scenarios'])
city=editable_data['city']
lat = float(editable_data['Latitude'])
lon = float(editable_data['Longitude'])
altitude = float(editable_data['Altitude']) #SLC altitude m
surf_tilt = float(editable_data['solar_tilt']) #panels tilt degree
surf_azimuth = float(editable_data['solar_azimuth']) #panels azimuth degree
idf_names= []
thermal_eff_dict= {}
weight_factor={}
for i in range(int(editable_data['number_buildings'])):
if 'building_name_'+str(i+1) in editable_data.keys():
building_name = editable_data['building_name_'+str(i+1)]
idf_names.append(building_name)
thermal_eff_dict[building_name]=float(editable_data['thermal_eff_'+str(i+1)])
weight_factor[building_name]=float(editable_data['WF_'+str(i+1)])
#idf_names=idf_names[1:2]
start_year = int(editable_data['starting_year'])
end_year = int(editable_data['ending_year'])
epw_names = []
for i_temp in range(num_scenarios):
for i_solar in range(num_scenarios):
epw_names.append('T_'+str(i_temp)+'_S_'+str(i_solar))
demand_directory = os.path.join(path_test, 'IDFBuildingsFiles')
output_directory = os.path.join(path_test, 'IDFBuildingsFiles')
# epw main files
dict_EPWs = {}
list_years = []
list_tmys =[]
list_fmys = []
for year in reversed(range(start_year,end_year+1)):
weather_data = city+'_'+str(lat)+'_'+str(lon)+'_psm3_60_'+str(year)
list_years.append(weather_data)
for i in range(5):
if 'TMY'+str(i+1)+'_name' in editable_data.keys():
TMY_name = editable_data['TMY'+str(i+1)+'_name']
list_tmys.append(TMY_name)
if 'FMY'+str(i+1)+'_name' in editable_data.keys():
FMY_name = editable_data['FMY'+str(i+1)+'_name']
list_fmys.append(FMY_name)
dict_EPWs['AMYs']=list_years
dict_EPWs['FMYs']=list_fmys
dict_EPWs['TMYs']=list_tmys
global k
def scenario_reduction_per_year(scenario_genrated,name,weather_data):
global k
days= 365
features_scenarios = defaultdict(list)
features_scenarios_list = []
features_probability_list = []
features_scenarios_nested = nested_dict()
scenario_probability = [1]*365
k = 0
#print(scenario_genrated)
for i in range(days):
data_new = scenario_genrated[i*24:(i+1)*24]
#print(data_new.keys())
data_1 = data_new['Total Electricity']
data_2 = data_new['Total Heating']
#print(data_1)
#print(name,i,k,data_1[15],data_2[15])
daily_list =list(chain(data_1.astype('float', copy=False),data_2.astype('float', copy=False)))
features_scenarios[k*days+i] = daily_list
features_scenarios_nested[i] = features_scenarios[k*days+i]
features_scenarios_list.append(features_scenarios[k*days+i])
features_probability_list.append(scenario_probability[i])
k = k+1
A = np.asarray(features_scenarios_list)
#Convert the dictionary of features to Series
standardization_data = StandardScaler()
A_scaled = standardization_data.fit_transform(A)
inertia_list = []
search_optimum_cluster = editable_data['Search optimum clusters'] # if I want to search for the optimum number of clusters: 1 is yes, 0 is no
cluster_range = range(2,30,1)
if search_optimum_cluster=='yes' and name== 'total_'+dict_EPWs['TMYs'][-1]+'_':
print('Defining the optimum number of clusters: ')
SMALL_SIZE = 14
MEDIUM_SIZE = 18
BIGGER_SIZE = 24
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (15,10)
fig, ax = plt.subplots(figsize=(15, 10))
for cluster_numbers in cluster_range:
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=0).fit(A_scaled)
inertia_list.append(kmedoids.inertia_)
plt.scatter(cluster_numbers,kmedoids.inertia_)
print('Cluster number:', cluster_numbers, ' Inertia of the cluster:', int(kmedoids.inertia_))
ax.set_xlabel('Number of clusters',fontsize=BIGGER_SIZE)
ax.set_ylabel('Inertia',fontsize=BIGGER_SIZE)
#ax.set_title('The user should use "Elbow method" to select the number of optimum clusters',fontsize=BIGGER_SIZE)
ax.plot(list(cluster_range),inertia_list)
ax.set_xticks(np.arange(2,30,1))
plt.savefig(os.path.join(path_test, 'Inertia vs Clusters.png'),dpi=300,facecolor='w')
plt.close()
print('"Inertia vs Clusters" figure is saved in the directory folder')
print('You can use the figure to select the optimum number of clusters' )
print('You should enter the new optimum number of clusters in EditableFile.csv file and re-run this part')
cluster_numbers= int(editable_data['Cluster numbers'])
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(A_scaled)
#kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(scores_pca)
label = kmedoids.fit_predict(A_scaled)
#filter rows of original data
probability_label = defaultdict(list)
index_label = defaultdict(list)
index_label_all = []
filtered_label={}
for i in range(cluster_numbers):
filtered_label[i] = A_scaled[label == i]
index_cluster=np.where(label==i)
if len(filtered_label[i])!=0:
index_cluster = index_cluster[0]
for j in index_cluster:
probability_label[i].append(features_probability_list[j])
index_label[i].append(j)
index_label_all.append(j)
else:
probability_label[i].append(0)
sum_probability = []
for key in probability_label.keys():
sum_probability.append(sum(probability_label[key]))
#print(kmedoids.predict([[0,0,0], [4,4,4]]))
#print(kmedoids.cluster_centers_,kmedoids.cluster_centers_[0],len(kmedoids.cluster_centers_))
A_scaled_list={}
clusters={}
clusters_list = []
label_list = []
data_labels={}
data_all_labels = defaultdict(list)
for center in range(len(kmedoids.cluster_centers_)):
clusters['cluster centers '+str(center)]= kmedoids.cluster_centers_[center]
clusters_list.append(kmedoids.cluster_centers_[center].tolist())
for scenario in range(len(A_scaled)):
data_all_labels[kmedoids.labels_[scenario]].append(standardization_data.inverse_transform(A_scaled[scenario].reshape(1,-1)))
#print(data_all_labels)
A_scaled_list[scenario]=A_scaled[scenario].tolist()
A_scaled_list[scenario].insert(0,kmedoids.labels_[scenario])
data_labels['labels '+str(scenario)]= A_scaled_list[scenario]
label_list.append(A_scaled[scenario].tolist())
df_clusters= pd.DataFrame(clusters)
df_labels = pd.DataFrame(data_labels)
df_clusters.to_csv(os.path.join(representative_days_path , name+ 'cluster_centers_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
df_labels.to_csv(os.path.join(representative_days_path , name + 'labels_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
#Reversing PCA using two methods:
#Reversing the cluster centers using method 1 (their results are the same)
Scenario_generated_new = standardization_data.inverse_transform(kmedoids.cluster_centers_)
#print('15 representative days',clusters_reverse[0][0],Scenario_generated_new[0][0],standardization_data.mean_[0],standardization_data.var_[0])
representative_day_all = {}
total_labels = []
represent_gaps = {}
scenario_data = {}
for key in filtered_label.keys():
total_labels.append(len(filtered_label[key]))
#print(len(probability_label[0])) 1990
#print(len(filtered_label[0])) 1990
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48):
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
#min_non_z = np.min(np.nonzero(scenario_data[k]))
#max_non_z = np.max(np.nonzero(scenario_data[k]))
#represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
#ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#for n in iranges:
# next_n = next(iranges)
# if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
# scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
# elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
# f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
# for m in range(n,next_n+1):
# scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
max_heating_scenarios_nested = nested_dict()
max_electricity_scenarios_nested = nested_dict()
total_heating_scenarios = []
total_electricity_scenarios = []
max_electricity_scenarios_nested_list = defaultdict(list)
max_heating_scenarios_nested_list = defaultdict(list)
accuracy_design_day = 0.99
design_day_heating = []
design_day_electricity = []
representative_day_max = {}
electricity_design_day = {}
heating_design_day = {}
for day in range(365):
for i in range(24):
k_elect=0
list_k_electricity = []
k_heat=0
list_k_heating = []
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,name+'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
if features_scenarios_nested[day][0:24][i]>electricity_demand[i]:
k_elect=1
list_k_electricity.append(k_elect)
k_elect=0
if features_scenarios_nested[day][24:48][i]>heating_demand[i]:
k_heat=1
list_k_heating.append(k_heat)
k_heat=0
if sum(list_k_electricity)==cluster_numbers: #This hour does not meet by any of the representative days
max_electricity_scenarios_nested_list[i].append(features_scenarios_nested[day][0:24][i])
total_electricity_scenarios.append(features_scenarios_nested[day][0:24][i])
if sum(list_k_heating)==cluster_numbers: #This hour does not meet by any of the representative days
max_heating_scenarios_nested_list[i].append(features_scenarios_nested[day][24:48][i])
total_heating_scenarios.append(features_scenarios_nested[day][24:48][i])
total_electricity_scenarios.sort(reverse=True)
total_heating_scenarios.sort(reverse=True)
max_electricity_hour = total_electricity_scenarios[35]
max_heating_hour = total_heating_scenarios[2]
#print(max_heating_hour,len(total_heating_scenarios),np.min(total_heating_scenarios),np.max(total_heating_scenarios))
design_day_heating = []
design_day_electricity = []
heating_dd = []
for i in range(24):
if len(max_electricity_scenarios_nested_list[i])==1:
design_day_electricity.append(max_electricity_scenarios_nested_list[i][0])
else:
try:
design_day_electricity.append(np.max([j for j in max_electricity_scenarios_nested_list[i] if j<max_electricity_hour]))
except:
design_day_electricity.append(0)
#print(i,len(max_heating_scenarios_nested_list[i]),max_heating_scenarios_nested_list[i])
if len(max_heating_scenarios_nested_list[i])==1:
heating_dd.append(max_heating_scenarios_nested_list[i][0])
design_day_heating.append(np.max(heating_dd))
else:
try:
heating_dd = [j for j in max_heating_scenarios_nested_list[i] if j<max_heating_hour]
design_day_heating.append(np.max(heating_dd))
except:
design_day_heating.append(0)
for i in range(24):
if design_day_electricity[i]==0:
if i==0:
design_day_electricity[i] = design_day_electricity[i+1]
elif i==23:
design_day_electricity[i] = design_day_electricity[i-1]
else:
design_day_electricity[i] = (design_day_electricity[i-1]+design_day_electricity[i+1])/2
if design_day_heating[i]==0:
if i==0:
design_day_heating[i] = design_day_heating[i+1]
elif i==23:
design_day_heating[i] = design_day_heating[i-1]
else:
design_day_heating[i] = (design_day_heating[i-1]+design_day_heating[i+1])/2
representative_day_max = {}
electricity_demand_total = defaultdict(list)
heating_demand_total = defaultdict(list)
heating_demand_max = {}
electricity_demand_max = {}
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,name+'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
#hours_representative_day= round(sum_probability[representative_day]/sum(sum_probability),4)*8760
heating_demand_max[represent]= np.mean(heating_demand)
electricity_demand_max[represent]= np.mean(electricity_demand)
high_electricity_index = []
high_heating_index = []
high_electricity_value = []
high_heating_value = []
key_max_electricity=max(electricity_demand_max, key=electricity_demand_max.get)
key_max_heating=max(heating_demand_max, key=heating_demand_max.get)
for key, value in max_electricity_scenarios_nested.items():
for inner_key, inner_value in max_electricity_scenarios_nested[key].items():
if inner_value>electricity_demand_max[key_max_electricity]:
high_electricity_index.append(scenario_number[key]*365+inner_key)
high_electricity_value.append(inner_value)
for key, value in max_heating_scenarios_nested.items():
for inner_key, inner_value in max_heating_scenarios_nested[key].items():
if inner_value>heating_demand_max[key_max_heating]:
high_heating_index.append(scenario_number[key]*365+inner_key)
high_heating_value.append(inner_value)
sum_probability.append(0.5*len(total_electricity_scenarios)/len(index_label_all)*365)
sum_probability.append(len(total_heating_scenarios)/len(index_label_all)*365)
filtered_label[cluster_numbers]=len(total_electricity_scenarios)
filtered_label[cluster_numbers+1]=len(total_heating_scenarios)
representative_day = cluster_numbers
data_represent_days_modified={'Electricity total (kWh)': design_day_electricity,
'Heating (kWh)': representative_day_max[key_max_electricity]['Heating (kWh)'],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
representative_day = cluster_numbers+1
data_represent_days_modified={'Electricity total (kWh)': representative_day_max[key_max_heating]['Electricity total (kWh)'],
'Heating (kWh)': design_day_heating,
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48): #24*5=120 features in each day
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
#min_non_z = np.min(np.nonzero(scenario_data[k]))
#zmax_non_z = np.max(np.nonzero(scenario_data[k]))
#represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
#ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#for n in iranges:
# next_n = next(iranges)
# if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
# scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
# elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
# f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
# for m in range(n,next_n+1):
# scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name + 'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
all_representative_days = clustring_kmean_forced.kmedoid_clusters(path_test,scenario_genrated,name)[2]
represent_day = defaultdict(list)
k=0
days= 365
for represent in range(int(editable_data['Cluster numbers'])+2):
for day in range(days):
data = scenario_genrated[day*24:(day+1)*24]
data_1 = data['Total Electricity']
data_2 = data['Total Heating']
#Total electricity and heating
daily_list =list(chain(data_1.astype('float', copy=False),data_2.astype('float', copy=False)))
#if round(all_representative_days[represent]['Electricity total (kWh)'][10],0)==round(daily_list[10],0):
# print('elect',represent, day, round(all_representative_days[represent]['Electricity total (kWh)'][10],0),round(daily_list[10],0))
#if round(all_representative_days[represent]['Heating (kWh)'][6],0)==round(daily_list[30],0):
# print('heat',represent, day, round(all_representative_days[represent]['Heating (kWh)'][6],0),round(daily_list[30],0))
if round(all_representative_days[represent]['Electricity total (kWh)'][10],0)==round(daily_list[10],0) and round(all_representative_days[represent]['Heating (kWh)'][6],0)==round(daily_list[30],0) :
represent_day[represent] = day
data_temp = []
data_dni = []
data_ghi = []
data_dhi = []
data_wind_speed = []
poa_components_vector = []
poa_global = []
hour = 0
for index_in_year in range(day*24,(day+1)*24):
data_temp.append(weather_data['temp_air'].tolist()[index_in_year])
data_dni.append(weather_data['dni'].tolist()[index_in_year])
data_ghi.append(weather_data['ghi'].tolist()[index_in_year])
data_dhi.append(weather_data['dhi'].tolist()[index_in_year])
data_wind_speed.append(weather_data['wind_speed'].tolist()[index_in_year])
dti = datetime.datetime(weather_data['year'].tolist()[index_in_year], weather_data['month'].tolist()[index_in_year], weather_data['day'].tolist()[index_in_year],hour)
solar_position = get_solarposition(dti,lat, lon, altitude, pressure=None, method='nrel_numpy', temperature=12)
solar_zenith = solar_position['zenith']
solar_azimuth = solar_position['azimuth']
poa_components_vector.append(get_total_irradiance(surf_tilt, surf_azimuth,
solar_zenith[0], solar_azimuth[0],
float(weather_data['dni'].tolist()[index_in_year]), float(weather_data['ghi'].tolist()[index_in_year]), float(weather_data['dhi'].tolist()[index_in_year]), dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990'))
poa_global.append(poa_components_vector[hour]['poa_global'])
hour +=1
for represent in range(int(editable_data['Cluster numbers'])+2):
all_representative_days[represent]['temp_air']=data_temp
all_representative_days[represent]['dni']=data_dni
all_representative_days[represent]['ghi']=data_ghi
all_representative_days[represent]['dhi']=data_dhi
all_representative_days[represent]['wind_speed']=data_wind_speed
all_representative_days[represent]['gti']=poa_global
all_representative_days[represent].to_csv(os.path.join(representative_days_path,name + 'Represent_days_modified_'+str(represent)+ '.csv'), index=False)
break
return data_all_labels, represent_day
cluster_numbers= int(editable_data['Cluster numbers'])+2
temps= []
gtis=[]
for scenario in range(len(epw_names)):
#output_prefix = building_type+'_'+epw_names[scenario]+'_'
weather_path = os.path.join(scenarios_path,epw_names[scenario]+'.csv')
data = pd.read_csv(weather_path)
if scenario<10:
gtis.append(round(np.mean(data['GTI']),1))
#print(epw_names[scenario],'GTI',np.mean(data['GTI']))
if scenario%10==0:
#print(epw_names[scenario],'Temp',np.mean(data['Temperature']))
temps.append(round(np.mean(data['Temperature']),1))
print('gti', gtis)
print('temps',temps)
scenario_generated_main = defaultdict(list)
elect_buildings_main = defaultdict(list)
gas_buildings_main = defaultdict(list)
elect_annual_main = defaultdict(list)
gas_annual_main = defaultdict(list)
for building_type in idf_names:
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = building_type+'_'+epw_file_name+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = | pd.read_csv(demand_data_path) | pandas.read_csv |
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": | pd.Int64Dtype() | pandas.Int64Dtype |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 2 17:59:33 2019
@author: anna
This script computes the Director order parameter, the Deuterium Order parameters,
lipids tilt and splay angles.
"""
import MDAnalysis
import matplotlib.pyplot as plt
import MDAnalysis.lib.NeighborSearch as NS
import numpy as np
from numpy.linalg import norm
import os
import sys
import pandas as pd
top = 'ANALYSIS/recentered_x.gro'
traj = 'ANALYSIS/recentered_x.xtc'
side = sys.argv[1] #'up' #sys.argv[1] # "up" for upper leaflet "down" for lower leaflet
u = MDAnalysis.Universe(top,traj)
resnames = np.unique(u.select_atoms('all and name P or name O2').resnames)
def identify_leaflets(u, time_ts):
"""Assign lipids to respective leaflets"""
z = u.select_atoms("all").center_of_geometry()[2]
COM_z= np.array([0,0,z]) #defines the global midplane position along z
x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]
box = np.array([x, y, z, 90, 90, 90])
### Determining side of the bilayer CHOL belongs to in this frame
lipid1 = 'CHL'
lipid2 = 'DLIP'
lipid3 = 'SSM'
lipid4 = 'DSPC'
lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1)
lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2)
lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3)
lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)
num_lpd2 = lpd2_atoms.n_atoms
num_lpd3 = lpd3_atoms.n_atoms
num_lpd4 = lpd4_atoms.n_atoms
# atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders
# select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet
# this must be done because CHOL rapidly flip-flops between leaflets
# so we must assign CHOL to each leaflet at every time step, and in large systems
# with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it
if side == 'up':
lpd2i = lpd2_atoms[:int((num_lpd2)/2)]
lpd3i = lpd3_atoms[:int((num_lpd3)/2)]
lpd4i = lpd4_atoms[:int((num_lpd4)/2)]
lipids = lpd2i + lpd3i + lpd4i
ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)
lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm
leaflet = lpd1i + lpd2i + lpd3i + lpd4i
elif side == 'down':
lpd2i = lpd2_atoms[int((num_lpd2)/2):]
lpd3i = lpd3_atoms[int((num_lpd3)/2):]
lpd4i = lpd4_atoms[int((num_lpd4)/2):]
lipids = lpd2i + lpd3i + lpd4i #+ lpd3i
ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)
lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm
leaflet = lpd1i + lpd2i + lpd3i+ lpd4i
return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet
# =============================================================================
# Identify local normals to the lipids:
# 1- Find the local best fitting plane to each lipid and its neighbours within a cutoff
# (taking into account the first neighbour to each lipid) using SVD algorithm
# 2- Find the normal to each plane and assign it to the each lipid
def standard_fit(X):
#algorithm 3.1 from https://www.ltu.se/cms_fs/1.51590!/svd-fitting.pdf
#easy explanation at: https://towardsdatascience.com/svd-8c2f72e264f
# Find the average of points (centroid) along the columns
C = np.average(X, axis=0)
# Create CX vector (centroid to point) matrix
CX = X - C
# Singular value decomposition
U, S, V = np.linalg.svd(CX)
# The last row of V matrix indicate the eigenvectors of
# smallest eigenvalues (singular values).
N = V[-1]
return C, N
def get_normals_CM_headgroups(head_coord, box, cutoff):
normals = np.zeros((len(head_coord),3))
indices = np.zeros((len(head_coord),1))
distarr = MDAnalysis.lib.distances.distance_array(head_coord, head_coord, box=box)
sigma_ij = np.zeros ((len(distarr), len(distarr)))
for row, i in enumerate(distarr):
for col, j in enumerate(distarr):
if distarr[row, col] < cutoff : #first neighbour
sigma_ij[row, col] = 1
else :
sigma_ij[row, col] = 0
for i in range(len(distarr)):
coords_for_plane = head_coord[sigma_ij[:,i] ==1]
C, N = standard_fit(coords_for_plane)
normals[i] = N
indices[i] = i
return np.column_stack((normals[:,0], normals[:,1], np.abs(normals[:,2]))) , indices , sigma_ij
# =============================================================================
def order_parameter(angle):
u = ((3/2)*(pow(np.cos(angle), 2))) - 0.5
return u
def head_tail_angle(head, tail, normal):
vect = head - tail
theta = np.arccos(np.sum(vect*normal, axis=1) / (norm(vect, axis =1) * norm(normal, axis=1)))
return theta
def compute_order_parameters(head, tail, normal):
theta = head_tail_angle(head, tail, normal)
u_lp = order_parameter(theta)
return u_lp
""" functions for DLIP """
def compute_directors_order_parameter_DLIP(DLIP_resid, leaflet):
head_DLIP_1 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and name C24').positions
tail_DLIP_1 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name 6C21 or name C216)').positions
head_DLIP_2 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and name C34').positions
tail_DLIP_2 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name 6C21 or name C216)').positions
u_DLIP_1 = compute_order_parameters(head_DLIP_1, tail_DLIP_1, normal_leaflet[leaflet.resnames == 'DLIP'])
u_DLIP_2 = compute_order_parameters(head_DLIP_2, tail_DLIP_2, normal_leaflet[leaflet.resnames == 'DLIP'])
return u_DLIP_1, u_DLIP_2
def compute_SCD_DLIP(DLIP_resid, leaflet, tails_DLIP_carbon):
DLIP_order_param_sn1 = []
DLIP_order_param_sn2 = []
for t, carbons in enumerate(tails_DLIP_carbon):
DLIP_Ci = u.select_atoms('resnum '+ str(DLIP_resid) + ' ' + 'and name C2%i'%carbons).positions
DLIP_HiR = u.select_atoms('resnum '+ str(DLIP_resid) + ' ' + 'and name H%iR'%carbons).positions
DLIP_Ci_sn1 = u.select_atoms('resnum '+ str(DLIP_resid) + ' ' + 'and name C3%i'%carbons).positions
DLIP_HiX = u.select_atoms('resnum '+ str(DLIP_resid) + ' ' + 'and name H%iX'%carbons).positions
DLIP_Scd_iR = compute_order_parameters(DLIP_Ci, DLIP_HiR, normal_leaflet[leaflet.resnames == 'DLIP'])
DLIP_Scd_iX = compute_order_parameters(DLIP_Ci_sn1, DLIP_HiX, normal_leaflet[leaflet.resnames == 'DLIP'])
DLIP_order_param_sn2.append(DLIP_Scd_iR)
DLIP_order_param_sn1.append(DLIP_Scd_iX)
return DLIP_order_param_sn2, DLIP_order_param_sn1
def compute_tilt_angle_DLIP(DLIP_resid, head_CM_DLIP, leaflet):
chain_A_1 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and name C216').positions
chain_A_2 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and name C217').positions
chain_A_3 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and name C218').positions
chain_B_1 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and name C316').positions
chain_B_2 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and name C317').positions
chain_B_3 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and name C318').positions
last_three_carbons_CM_DLIP = np.average([chain_A_1, chain_A_2, chain_A_3, chain_B_1, chain_B_2, chain_B_3], axis = 0)
tilt_angle_DLIP = head_tail_angle(head_CM_DLIP, last_three_carbons_CM_DLIP, normal_leaflet[leaflet.resnames == 'DLIP'])
return head_CM_DLIP, last_three_carbons_CM_DLIP, tilt_angle_DLIP, head_CM_DLIP - last_three_carbons_CM_DLIP
def return_coordinates_chains_DLIP(DLIP_resid):
chain_DLIP_A_1 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name C26)').positions
chain_DLIP_A_2 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name C27)').positions
chain_DLIP_A_3 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name C28)').positions
chain_DLIP_A_4 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name C29)').positions
chain_DLIP_B_1 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name C36)').positions
chain_DLIP_B_2 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name C37)').positions
chain_DLIP_B_3 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name C38)').positions
chain_DLIP_B_4 = u.select_atoms('resnum ' + str(DLIP_resid) + ' and (name C39)').positions
#coordinates of the lipids
DLIP_C_o_G_chainA = np.average([chain_DLIP_A_1, chain_DLIP_A_2, chain_DLIP_A_3, chain_DLIP_A_4], axis = 0)
DLIP_C_o_G_chainB = np.average([chain_DLIP_B_1, chain_DLIP_B_2, chain_DLIP_B_3, chain_DLIP_B_4], axis = 0)
return DLIP_C_o_G_chainA, DLIP_C_o_G_chainB
""" functions for DSPC """
def compute_directors_order_parameter_DSPC(DSPC_resid, leaflet):
head_DSPC_1 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and name C24').positions
tail_DSPC_1 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name 6C21 or name C216)').positions
head_DSPC_2 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and name C34').positions
tail_DSPC_2 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name 6C31 or name C316)').positions
u_DSPC_1 = compute_order_parameters(head_DSPC_1,tail_DSPC_1, normal_leaflet[leaflet.resnames == 'DSPC'])
u_DSPC_2 = compute_order_parameters(head_DSPC_2,tail_DSPC_2, normal_leaflet[leaflet.resnames == 'DSPC'])
return u_DSPC_1, u_DSPC_2
def return_coordinates_chains_DSPC(DSPC_resid):
chain_DSPC_A_1 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name C26)').positions
chain_DSPC_A_2 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name C27)').positions
chain_DSPC_A_3 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name C28)').positions
chain_DSPC_A_4 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name C29)').positions
chain_DSPC_B_1 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name C36)').positions
chain_DSPC_B_2 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name C37)').positions
chain_DSPC_B_3 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name C38)').positions
chain_DSPC_B_4 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and (name C39)').positions
#coordinates of the lipids
DSPC_C_o_G_chainA = np.average([chain_DSPC_A_1, chain_DSPC_A_2, chain_DSPC_A_3, chain_DSPC_A_4], axis = 0)
DSPC_C_o_G_chainB = np.average([chain_DSPC_B_1, chain_DSPC_B_2, chain_DSPC_B_3, chain_DSPC_B_4], axis = 0)
return DSPC_C_o_G_chainA, DSPC_C_o_G_chainB
def compute_SCD_DSPC(DSPC_resid, leaflet, tails_DSPC_carbon):
DSPC_order_param_sn1 = []
DSPC_order_param_sn2 = []
for i, carbons in enumerate(tails_DSPC_carbon):
DSPC_Ci = u.select_atoms('resnum '+ str(DSPC_resid) + ' ' + 'and name C2%i'%carbons).positions
DSPC_HiR = u.select_atoms('resnum '+ str(DSPC_resid) + ' ' + 'and name H%iR'%carbons).positions
DSPC_Ci_sn1 = u.select_atoms('resnum '+ str(DSPC_resid) + ' ' + 'and name C3%i'%carbons).positions
DSPC_HiX = u.select_atoms('resnum '+ str(DSPC_resid) + ' ' + 'and name H%iX'%carbons).positions
DSPC_Scd_iR = compute_order_parameters(DSPC_Ci, DSPC_HiR, normal_leaflet[leaflet.resnames == 'DSPC'])
DSPC_Scd_iX = compute_order_parameters(DSPC_Ci_sn1, DSPC_HiX, normal_leaflet[leaflet.resnames == 'DSPC'])
DSPC_order_param_sn2.append(DSPC_Scd_iR)
DSPC_order_param_sn1.append(DSPC_Scd_iX)
return DSPC_order_param_sn1, DSPC_order_param_sn2
def compute_tilt_angle_DSPC(DSPC_resid, head_CM_DSPC , leaflet):
#compute the TILT angle for DSPC
chain_A_1 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and name C216').positions
chain_A_2 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and name C217').positions
chain_A_3 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and name C218').positions
chain_B_1 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and name C316').positions
chain_B_2 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and name C317').positions
chain_B_3 = u.select_atoms('resnum ' + str(DSPC_resid) + ' and name C318').positions
last_three_carbons_CM_DSPC = np.average([chain_A_1, chain_A_2, chain_A_3, chain_B_1, chain_B_2, chain_B_3], axis = 0)
tilt_angle_DSPC = head_tail_angle(head_CM_DSPC, last_three_carbons_CM_DSPC, normal_leaflet[leaflet.resnames == 'DSPC'])
return head_CM_DSPC, last_three_carbons_CM_DSPC, tilt_angle_DSPC , head_CM_DSPC - last_three_carbons_CM_DSPC
""" functions for SSM """
def compute_directors_order_parameter_SSM(SSM_resid, leaflet):
head_SSM_1 = u.select_atoms('resnum ' + str(SSM_resid) + ' and name C6S').positions
tail_SSM_1 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C16S)').positions
head_SSM_2 = u.select_atoms('resnum ' + str(SSM_resid) + ' and name C6F').positions
tail_SSM_2 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C16F )').positions
u_SSM_1 = compute_order_parameters(head_SSM_1,tail_SSM_1, normal_leaflet[leaflet.resnames == 'SSM'])
u_SSM_2 = compute_order_parameters(head_SSM_2,tail_SSM_2, normal_leaflet[leaflet.resnames == 'SSM'])
return u_SSM_1, u_SSM_2
def return_coordinates_chains_SSM(SSM_resid):
chain_SSM_A_1 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C8S)').positions
chain_SSM_A_2 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C9S)').positions
chain_SSM_A_3 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C10S)').positions
chain_SSM_A_4 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C11S)').positions
chain_SSM_B_1 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C8F)').positions
chain_SSM_B_2 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C9F)').positions
chain_SSM_B_3 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C10F)').positions
chain_SSM_B_4 = u.select_atoms('resnum ' + str(SSM_resid) + ' and (name C11F)').positions
#coordinates of the lipids
SSM_C_o_G_chainA = np.average([chain_SSM_A_1, chain_SSM_A_2, chain_SSM_A_3, chain_SSM_A_4], axis = 0)
SSM_C_o_G_chainB = np.average([chain_SSM_B_1, chain_SSM_B_2, chain_SSM_B_3, chain_SSM_B_4], axis = 0)
return SSM_C_o_G_chainA, SSM_C_o_G_chainB
def compute_SCD_SSM(SSM_resid, leaflet, tails_SSM_carbon):
SSM_order_param_sn1 = []
SSM_order_param_sn2 = []
for i, carbons in enumerate(tails_SSM_carbon):
SSM_Ci = u.select_atoms('resnum '+ str(SSM_resid) + ' ' + 'and name C%iS'%carbons).positions
SSM_HiR = u.select_atoms('resnum '+ str(SSM_resid) + ' ' + 'and name H%iS'%carbons).positions
SSM_Ci_sn1 = u.select_atoms('resnum '+ str(SSM_resid) + ' ' + 'and name C%iF'%carbons).positions
SSM_HiX = u.select_atoms('resnum '+ str(SSM_resid) + ' ' + 'and name H%iS'%carbons).positions
SSM_Scd_iR = compute_order_parameters(SSM_Ci, SSM_HiR, normal_leaflet[leaflet.resnames == 'SSM'])
SSM_Scd_iX = compute_order_parameters(SSM_Ci_sn1, SSM_HiX, normal_leaflet[leaflet.resnames == 'SSM'])
SSM_order_param_sn2.append(SSM_Scd_iR)
SSM_order_param_sn1.append(SSM_Scd_iX)
return SSM_order_param_sn1, SSM_order_param_sn2
def compute_tilt_angle_SSM(SSM_resid, head_CM_SSM, leaflet):
# take atoms to define the CM of teh headgroups. These will be used to find the local normals
#compute the TILT angle for SSM
chain_A_1 = u.select_atoms('resnum ' + str(SSM_resid) + ' and name C16S').positions
chain_A_2 = u.select_atoms('resnum ' + str(SSM_resid) + ' and name C17S').positions
chain_A_3 = u.select_atoms('resnum ' + str(SSM_resid) + ' and name C18S').positions
chain_B_1 = u.select_atoms('resnum ' + str(SSM_resid) + ' and name C16F').positions
chain_B_2 = u.select_atoms('resnum ' + str(SSM_resid) + ' and name C17F').positions
chain_B_3 = u.select_atoms('resnum ' + str(SSM_resid) + ' and name C18F').positions
last_three_carbons_CM_SSM = np.average([chain_A_1, chain_A_2, chain_A_3, chain_B_1, chain_B_2, chain_B_3], axis = 0)
tilt_angle_SSM = head_tail_angle(head_CM_SSM, last_three_carbons_CM_SSM, normal_leaflet[leaflet.resnames == 'SSM'])
return head_CM_SSM, last_three_carbons_CM_SSM, tilt_angle_SSM , head_CM_SSM - last_three_carbons_CM_SSM
""" Compute splay angle """
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
import numpy.linalg as la
def compute_angle(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'
The sign of the angle is dependent on the order of v1 and v2
so acos(norm(dot(v1, v2))) does not work and atan2 has to be used, see:
https://stackoverflow.com/questions/21483999/using-atan2-to-find-angle-between-two-vectors
"""
cosang = np.dot(v1, v2)
sinang = la.norm(np.cross(v1, v2))
angle = np.arctan2(sinang, cosang)
return angle
def compute_splays(first_neighbors_splay, t, all_tilts_vect_upper):
angles_splay = np.zeros(( len(first_neighbors_splay[0]), 4))
t = np.full(len(first_neighbors_splay[0]), t)
for i in range(len(first_neighbors_splay[0])):
angles_splay[i, :] = compute_angle(all_tilts_vect_upper[first_neighbors_splay[0][i]], all_tilts_vect_upper[first_neighbors_splay[1][i]]), first_neighbors_splay[0][i], first_neighbors_splay[1][i], t[i]
return angles_splay
def main(leaflet, COM_z, box, lipid1, lipid2,lipid3, lipid4,
DLIP_resid, DSPC_resid, SSM_resid, CHL_resid,
head_CHL_1, head_CM_lipid2, head_CM_lipid3, head_CM_lipid4,
tails_DLIP_carbon, tails_DSPC_carbon, tails_SSM_carbon):
""" DLIP """
#DLIP_resid =' '.join(str(x) for x in DLIP_per_leaflet.resnums[:] )
# compute the director order parameter
u_DLIP_1, u_DLIP_2 = compute_directors_order_parameter_DLIP(DLIP_resid, leaflet)
DLIP_C_o_G_chainA, DLIP_C_o_G_chainB = return_coordinates_chains_DLIP(DLIP_resid)
#compute the tilt angles (according to the definition of https://doi.org/10.1021/ct400492e)
head_CM_DLIP, last_three_carbons_CM_DLIP, tilt_angle_DLIP, tilt_vect_DLIP = compute_tilt_angle_DLIP(DLIP_resid, head_CM_lipid2, leaflet)
#compute the SCD order parameter
DLIP_order_param_sn2, DLIP_order_param_sn1 = compute_SCD_DLIP(DLIP_resid, leaflet, tails_DLIP_carbon)
""" SSM """
#compute the director order parameter for DSPC
u_SSM_1, u_SSM_2 = compute_directors_order_parameter_SSM(SSM_resid, leaflet)
SSM_C_o_G_chainA, SSM_C_o_G_chainB = return_coordinates_chains_SSM(SSM_resid)
#compute the SCD order
SSM_order_param_sn1, SSM_order_param_sn2 = compute_SCD_SSM(SSM_resid, leaflet, tails_SSM_carbon)
#compute the tilt angles (according to the definition of https://doi.org/10.1021/ct400492e)
head_CM_SSM, last_three_carbons_CM_SSM, tilt_angle_SSM, tilt_vect_SSM = compute_tilt_angle_SSM(SSM_resid,head_CM_lipid3, leaflet)
""" DSPC """
#compute the director order parameter for DSPC
u_DSPC_1, u_DSPC_2 = compute_directors_order_parameter_DSPC(DSPC_resid, leaflet)
DSPC_C_o_G_chainA, DSPC_C_o_G_chainB = return_coordinates_chains_DSPC(DSPC_resid)
#compute the SCD order
DSPC_order_param_sn1, DSPC_order_param_sn2 = compute_SCD_DSPC(DSPC_resid, leaflet, tails_DSPC_carbon)
#compute the tilt angles (according to the definition of https://doi.org/10.1021/ct400492e)
head_CM_DSCP, last_three_carbons_CM_DSCP, tilt_angle_DSPC, tilt_vect_DSPC = compute_tilt_angle_DSPC(DSPC_resid,head_CM_lipid4, leaflet)
""" CHL """
tail_CHL_1 = u.select_atoms('resnum ' + str(CHL_resid) + ' and name C65').positions
chain_CHL_A_1 = u.select_atoms('resnum ' + str(CHL_resid) + ' and name C24').positions
#Todo check how to calculate theta
u_CHL_1 = compute_order_parameters(head_CHL_1, tail_CHL_1, normal_leaflet[leaflet.resnames == 'CHL'])
tilt_angle_CHL = head_tail_angle(head_CHL_1, tail_CHL_1, normal_leaflet[leaflet.resnames == 'CHL'])
tilt_vect_CHL= head_CHL_1 - tail_CHL_1
CHL_C_o_G_chainA = np.average([chain_CHL_A_1, head_CHL_1, tail_CHL_1], axis = 0)
O2_z = u.select_atoms('resnum ' + str(CHL_resid) + ' and name O2').positions
C24_z = chain_CHL_A_1
O2_CM = O2_z - COM_z
C1_CM = head_CHL_1 - COM_z
C24_CM =C24_z - COM_z
dist_O2_CM = O2_CM [:,2]
dist_C24_CM = C24_CM[:,2]
dist_C1_CM = C1_CM [:,2]
""" END """
u_CHL_1 = u_CHL_1.reshape(len(u_CHL_1), 1)
u_DLIP_1 = u_DLIP_1.reshape(len(u_DLIP_1), 1)
u_DLIP_2 = u_DLIP_2.reshape(len(u_DLIP_2), 1)
u_DSPC_1 = u_DSPC_1.reshape(len(u_DSPC_1), 1)
u_DSPC_2 = u_DSPC_2.reshape(len(u_DSPC_2), 1)
u_SSM_1 = u_SSM_1.reshape(len(u_SSM_1), 1)
u_SSM_2 = u_SSM_2.reshape(len(u_SSM_2), 1)
CHL_res = lipid1.resnums
DLIP_res = lipid2.resnums
SSM_res = lipid3.resnums
DSPC_res = lipid4.resnums
res1 = CHL_res.reshape(len(CHL_res), 1)
res2 = DLIP_res.reshape(len(DLIP_res), 1)
res3 = DSPC_res.reshape(len(DSPC_res), 1)
res4 = SSM_res.reshape(len(SSM_res), 1)
u_all = np.vstack((u_CHL_1, u_DLIP_1, u_DLIP_2, u_SSM_1, u_SSM_2, u_DSPC_1, u_DSPC_2))
lpd_coords = np.vstack((CHL_C_o_G_chainA, DLIP_C_o_G_chainA, DLIP_C_o_G_chainB, SSM_C_o_G_chainA, SSM_C_o_G_chainB, DSPC_C_o_G_chainA, DSPC_C_o_G_chainB ))
lpd_res_all =np.vstack((res1, res2, res2, res3, res3, res4, res4))
u_1 = np.vstack((u_CHL_1, u_DLIP_1, u_SSM_1, u_DSPC_1))
u_2 = np.vstack((u_CHL_1, u_DLIP_2, u_SSM_2, u_DSPC_2))
return lpd_coords, lpd_res_all, u_all, res1, CHL_C_o_G_chainA, dist_O2_CM, dist_C24_CM, dist_C1_CM, res2, res3, res4, box,\
DLIP_order_param_sn2, DLIP_order_param_sn1, DLIP_C_o_G_chainA, DLIP_C_o_G_chainB,\
DSPC_order_param_sn2, DSPC_order_param_sn1, DSPC_C_o_G_chainA, DSPC_C_o_G_chainB,\
SSM_order_param_sn2, SSM_order_param_sn1, SSM_C_o_G_chainA, SSM_C_o_G_chainB,\
tilt_angle_DSPC, tilt_angle_DLIP, tilt_angle_CHL, tilt_angle_SSM, tilt_vect_DSPC, tilt_vect_DLIP, tilt_vect_CHL, tilt_vect_SSM, u_1, u_2
directory_directors = "ANALYSIS/directors/"
directory_deuterium = "ANALYSIS/deuterium/"
directory_tilts = "ANALYSIS/tilts_local_normals/"
#create the directory in the file path if it does not exist
if not os.path.exists(directory_directors):
os.makedirs(directory_directors)
if not os.path.exists(directory_deuterium):
os.makedirs(directory_deuterium)
if not os.path.exists(directory_tilts):
os.makedirs(directory_tilts)
tails_DLIP_carbon = np.arange(2, 19)
tails_DSPC_carbon = np.arange(2, 19)
tails_SSM_carbon = np.arange(2, 19)
av_C_DLIP_sn1 = []
av_C_DLIP_sn2 = []
av_C_SSM_sn1 = []
av_C_SSM_sn2 = []
av_C_DSPC_sn1 = []
av_C_DSPC_sn2 = []
#results_splay = [] #should go befor the ts loop
for ts in u.trajectory[0:u.trajectory.n_frames:1]: #every 1 ns
#lipid1 == CHOL, lipid2 == DLIPC, lipid4 == DSPC
lipid1, lipid2, lipid3, lipid4, COM_z, box,leaflet = identify_leaflets(u, ts)
lpd_resid =' '.join(str(x) for x in leaflet.resnums[:])
#headgroups= u.select_atoms('resnum ' + str(lpd_resid) + ' and ( (name P or name O2)) ')
lpd1_resid =' '.join(str(x) for x in lipid1.resnums[:])
lpd2_resid =' '.join(str(x) for x in lipid2.resnums[:])
lpd3_resid =' '.join(str(x) for x in lipid3.resnums[:])
lpd4_resid =' '.join(str(x) for x in lipid4.resnums[:])
#headgroups_for_normals CHL
head_CM_lipid1 = u.select_atoms('resnum ' + str(lpd1_resid) + ' and name C1').positions
#headgroups_for_normals DLIPC
head_1_lpd2 = u.select_atoms('resnum ' + str(lpd2_resid) + ' and name P').positions
head_2_lpd2 = u.select_atoms('resnum ' + str(lpd2_resid) + ' and name C3').positions
head_3_lpd2 = u.select_atoms('resnum ' + str(lpd2_resid) + ' and name C21').positions
head_4_lpd2 = u.select_atoms('resnum ' + str(lpd2_resid) + ' and name C2').positions
head_CM_lipid2 = np.average([head_1_lpd2, head_2_lpd2, head_3_lpd2, head_4_lpd2], axis = 0)
#headgroups_for_normals SSM
head_1_lpd3 = u.select_atoms('resnum ' + str(lpd3_resid) + ' and name P').positions
head_2_lpd3 = u.select_atoms('resnum ' + str(lpd3_resid) + ' and name C3S').positions
head_3_lpd3 = u.select_atoms('resnum ' + str(lpd3_resid) + ' and name C2S').positions
head_4_lpd3 = u.select_atoms('resnum ' + str(lpd3_resid) + ' and name C1F').positions
head_CM_lipid3 = np.average([head_1_lpd3, head_2_lpd3, head_3_lpd3, head_4_lpd3], axis = 0)
#headgroups_for_normals DSPC
head_1_lpd4 = u.select_atoms('resnum ' + str(lpd4_resid) + ' and name P').positions
head_2_lpd4 = u.select_atoms('resnum ' + str(lpd4_resid) + ' and name C3').positions
head_3_lpd4 = u.select_atoms('resnum ' + str(lpd4_resid) + ' and name C21').positions
head_4_lpd4 = u.select_atoms('resnum ' + str(lpd4_resid) + ' and name C2').positions
head_CM_lipid4 = np.average([head_1_lpd4, head_2_lpd4, head_3_lpd4, head_4_lpd4], axis = 0)
headgroups_coord = np.vstack((head_CM_lipid1, head_CM_lipid2, head_CM_lipid3, head_CM_lipid4))
normal_leaflet, indices, sigma = get_normals_CM_headgroups(headgroups_coord, box, cutoff=15)
coord, residues, directors, chl_res, chl_coord, distance_head_chl_to_center, distance_tail_chl_to_center,distance_C1_chl_to_center, DLIP_res, ssm_res, dspc_res, box,DLIP_order_param_sn2, DLIP_order_param_sn1,DLIP_C_o_G_chainA, DLIP_C_o_G_chainB,DSPC_order_param_sn2, DSPC_order_param_sn1, DSPC_C_o_G_chainA,DSPC_C_o_G_chainB, SSM_order_param_sn2, SSM_order_param_sn1, SSM_C_o_G_chainA, SSM_C_o_G_chainB, tilt_angle_DSPC,tilt_angle_DLIP, tilt_angle_CHL, tilt_angle_SSM, tilt_vect_DSPC, tilt_vect_DLIP, tilt_vect_CHL, tilt_vect_SSM, u_1, u_2 = main(leaflet, COM_z, box, lipid1, lipid2, lipid3, lipid4, lpd2_resid, lpd4_resid, lpd3_resid, lpd1_resid, head_CM_lipid1, head_CM_lipid2,head_CM_lipid3, head_CM_lipid4, tails_DLIP_carbon,tails_DSPC_carbon, tails_SSM_carbon)
DLIP_sn1= np.mean(np.vstack((DLIP_order_param_sn1)) , axis=0)
DLIP_sn2= np.mean(np.vstack((DLIP_order_param_sn2)), axis=0)
SSM_sn1= np.mean(np.vstack((SSM_order_param_sn1)) , axis=0)
SSM_sn2= np.mean(np.vstack((SSM_order_param_sn2)), axis=0)
DSPC_sn1= np.mean(np.vstack((DSPC_order_param_sn1)) , axis=0)
DSPC_sn2= np.mean(np.vstack((DSPC_order_param_sn2)), axis=0)
av_C_DLIP_sn1.append(DLIP_order_param_sn1)
av_C_DLIP_sn2.append(DLIP_order_param_sn2)
av_C_SSM_sn1.append(SSM_order_param_sn1)
av_C_SSM_sn2.append(SSM_order_param_sn2)
av_C_DSPC_sn1.append(DSPC_order_param_sn1)
av_C_DSPC_sn2.append(DSPC_order_param_sn2)
resids = np.vstack((chl_res, DLIP_res, ssm_res, dspc_res))
resnames = np.vstack(( np.array(['CHL']*len(chl_res)).reshape(len(chl_res),1), np.array(['DLIP']*len(DLIP_res)).reshape(len(DLIP_res),1), np.array(['SSM']*len(ssm_res)).reshape(len(ssm_res),1), np.array(['DSPC']*len(dspc_res)).reshape(len(dspc_res),1) ))
Scd_sn1 = np.vstack(( np.array([np.nan]*len(chl_res)).reshape(len(chl_res),1), DLIP_sn1.reshape(len(DLIP_sn1), 1), SSM_sn1.reshape(len(SSM_sn1), 1), DSPC_sn1.reshape(len(DSPC_sn1), 1) ))
Scd_sn2 = np.vstack(( np.array([np.nan]*len(chl_res)).reshape(len(chl_res),1), DLIP_sn2.reshape(len(DLIP_sn2), 1), SSM_sn2.reshape(len(SSM_sn2), 1), DSPC_sn2.reshape(len(DSPC_sn2), 1) ))
tilt_angles = np.vstack(( tilt_angle_CHL.reshape(len(tilt_angle_CHL),1), tilt_angle_DLIP.reshape(len(tilt_angle_DLIP),1), tilt_angle_SSM.reshape(len(tilt_angle_SSM),1), tilt_angle_DSPC.reshape(len(tilt_angle_DSPC),1) ))
tilt_vects = np.vstack(( tilt_vect_CHL, tilt_vect_DLIP, tilt_vect_SSM, tilt_vect_DSPC))
lpd_coords1 = np.vstack((chl_coord, DLIP_C_o_G_chainA, SSM_C_o_G_chainA, DSPC_C_o_G_chainA))
lpd_coords2 = np.vstack((chl_coord, DLIP_C_o_G_chainB, SSM_C_o_G_chainB, DSPC_C_o_G_chainB))
times = [ts.frame]*len(resids)
""" SPLAY calcultions """
""" read in the headgroups coordinates and find who is the neighbours to every single lipid """
dist_vect_arr = MDAnalysis.lib.distances.distance_array(headgroups_coord, headgroups_coord, box=box)
""" compute only the firs neighbour"""
first_neighbors= np.argsort(dist_vect_arr, axis =0)[0:2,:]
""" compute the splay angle for every lipid and its firts neighbour and store the information into an array """
angles_splay = (compute_splays(first_neighbors, ts.frame, tilt_vects)) #this is an array
df = pd.DataFrame({'Time': times, 'Resid': resids.flatten(),'Resnames': resnames.flatten(), 'Scd_sn1': Scd_sn1.flatten(),'Scd_sn2': Scd_sn2.flatten(),
'u_1': u_1.flatten() , 'u_2': u_2.flatten(), 'CM coord1 X': lpd_coords1[:,0], 'CM coord1 Y': lpd_coords1[:,1],'CM coord1 Z': lpd_coords1[:,2],
'CM coord2 X': lpd_coords2[:,0], 'CM coord2 Y': lpd_coords2[:,1],'CM coord2 Z': lpd_coords2[:,2],
'Head Coord X': headgroups_coord[:,0], 'Head Coord Y': headgroups_coord[:,1], 'Head Coord Z': headgroups_coord[:,2] ,'Tilt_angles': tilt_angles.flatten(), #,
'Tilt_vects X': tilt_vects[:,0], 'Tilt_vects Y': tilt_vects[:,1], 'Tilt_vects Z': tilt_vects[:,2], 'Splay': angles_splay[:,0] })
df.to_pickle(directory_directors + 'Dataframe'+ str(side) + str(ts.frame))
box_df = | pd.DataFrame({'Time':times[0] , 'box_x': [box[0]], 'box_y': [box[1]], 'box_z': [box[2]], 'alpha' : [box[3]], 'beta' : [box[4]], 'gamma' : [box[5]]}) | pandas.DataFrame |
import json
import re
from glob import glob
from os import makedirs, path
import pandas as pd
from mne import Evoked, write_evokeds
from mne.channels.layout import _find_topomap_coords
from mne.time_frequency import AverageTFR, write_tfrs
def files_from_dir(dir_path, extensions, natsort_files=True):
"""Retrieves files matching pattern(s) from a given parent directory."""
# Find all files with one of the right extensions
assert path.isdir(dir_path), f'Didn\'t find directory `{dir_path}`!'
files = []
for extension in extensions:
files += glob(f'{dir_path}/*.{extension}')
# Sort naturally because some files might not have leading zeros
if natsort_files:
natsort = lambda s: [
int(t) if t.isdigit() else t.lower() for t in re.split('(\d+)', s)]
files = sorted(files, key=natsort)
return files
def convert_participant_input(input, participant_ids):
"""Converts different inputs (e.g., dict) into a per-participant list."""
# If it's a dict, convert to list
if isinstance(input, dict):
participant_dict = {id: None for id in participant_ids}
for id, values in input.items():
assert id in participant_ids, \
f'Participant ID {id} is not in vhdr_files'
participant_dict[id] = values
return participant_dict.values()
# If it's a list of list, it must have the same length as participant_ids
elif is_nested_list(input):
assert len(input) == len(participant_ids), \
'Input lists must have the same length'
return input
# Otherwise all participants get the same values
else:
return [input] * len(participant_ids)
def is_nested_list(input):
"""Checks if a list is nested, i.e., contains at least one other list."""
# Check if there is any list in the list
if isinstance(input, list):
return any(isinstance(elem, list) for elem in input)
else:
return False
def save_clean(raw, output_dir, participant_id=''):
"""Saves cleaned (continuous) EEG data in `.fif` format."""
# Re-format participant ID for filename
participant_id_ = '' if participant_id == '' else f'{participant_id}_'
suffix = 'cleaned_eeg'
# Create output folder and save
makedirs(output_dir, exist_ok=True)
fname = f'{output_dir}/{participant_id_}{suffix}.fif'
raw.save(fname)
def save_df(df, output_dir, participant_id='', suffix=''):
"""Saves pd.DataFrame in `.csv` format."""
# Create output folder
makedirs(output_dir, exist_ok=True)
# Re-format participant ID and suffix for filename
participant_id_ = '' if participant_id == '' else f'{participant_id}_'
suffix = '' if suffix == '' else suffix
# Save DataFrame
fname = f'{output_dir}/{participant_id_}{suffix}.csv'
df.to_csv(
fname, na_rep='NA', float_format='%.4f', index=False)
def save_epochs(epochs, output_dir, participant_id='', to_df=True):
"""Saves mne.Epochs with metadata in `.fif` and/or `.csv` format."""
# Create output folder
makedirs(output_dir, exist_ok=True)
# Re-format participant ID for filename
participant_id_ = '' if participant_id == '' else f'{participant_id}_'
suffix = 'epo'
# Convert to DataFrame
if to_df is True or to_df == 'both':
scalings = {'eeg': 1e6, 'misc': 1e6}
epochs_df = epochs.to_data_frame(scalings=scalings, time_format=None)
# Add metadata from log file
metadata_df = epochs.metadata.copy()
metadata_df = metadata_df.drop([col for col in metadata_df.columns
if col in epochs_df.columns], axis=1)
n_samples = len(epochs.times)
metadata_df = metadata_df.loc[metadata_df.index.repeat(n_samples)]
metadata_df = metadata_df.reset_index(drop=True)
epochs_df = | pd.concat([metadata_df, epochs_df], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import datetime as dt
import requests
import io
import zipfile
from kungfu.series import FinancialSeries
from kungfu.frame import FinancialDataFrame
def download_factor_data(freq='D'):
'''
Downloads factor data from Kenneth French's website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
'''
if freq is 'D':
# Download Carhartt 4 Factors
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
mom = web.DataReader('F-F_Momentum_Factor_daily', 'famafrench', start='1/1/1900')[0]
factors_daily = factors_daily.join(mom)
factors_daily = factors_daily[['Mkt-RF','SMB','HML','Mom ','RF']]
factors_daily.columns = ['Mkt-RF','SMB','HML','Mom','RF']
return FinancialDataFrame(factors_daily)
elif freq is 'M':
# Download Carhartt 4 Factors
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
# mom = web.DataReader('F-F_Momentum_Factor', 'famafrench', start='1/1/1900')[0] #There seems to be a problem with the data file, fix if mom is needed
# factors_monthly = factors_monthly.join(mom)
# factors_monthly = factors_monthly[['Mkt-RF','SMB','HML','Mom ','RF']]
factors_monthly.index = factors_monthly.index.to_timestamp()
# factors_monthly.columns = ['Mkt-RF','SMB','HML','Mom','RF']
factors_monthly.columns = ['Mkt-RF','SMB','HML','RF']
factors_monthly.index = factors_monthly.index+pd.tseries.offsets.MonthEnd(0)
return FinancialDataFrame(factors_monthly)
def download_industry_data(freq='D', excessreturns = True):
'''
Downloads industry data from <NAME>'s website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
excessreturns is a boolean to define if the the function should return excess returns.
'''
if freq is 'D':
# Download Fama/French 49 Industries
industries_daily = web.DataReader("49_Industry_Portfolios_Daily", "famafrench", start='1/1/1900')[0]
industries_daily[(industries_daily <= -99.99) | (industries_daily == -999)] = np.nan #set missing data to NaN
industries_daily = industries_daily.rename_axis('Industry', axis='columns')
if excessreturns is True:
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
industries_daily = industries_daily.subtract(factors_daily['RF'], axis=0) #transform into excess returns
return industries_daily
elif freq is 'M':
# Download Fama/French 49 Industries
industries_monthly = web.DataReader("49_Industry_Portfolios", "famafrench", start='1/1/1900')[0]
industries_monthly[(industries_monthly <= -99.99) | (industries_monthly == -999)] = np.nan #set missing data to NaN
industries_monthly = industries_monthly.rename_axis('Industry', axis='columns')
industries_monthly.index = industries_monthly.index.to_timestamp()
if excessreturns is True:
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
factors_monthly.index = factors_monthly.index.to_timestamp()
industries_monthly = industries_monthly.subtract(factors_monthly['RF'], axis=0) #transform into excess returns
industries_monthly.index = industries_monthly.index+pd.tseries.offsets.MonthEnd(0)
return industries_monthly
def download_25portfolios_data(freq='D', excessreturns = True):
'''
Downloads 25 portfolios data from Kenneth French's website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
excessreturns is a boolean to define if the the function should return excess returns.
'''
if freq is 'D':
# Download Fama/French 25 portfolios
portfolios_daily = web.DataReader("25_Portfolios_5x5_CSV", "famafrench", start='1/1/1900')[0]
portfolios_daily[(portfolios_daily <= -99.99) | (portfolios_daily == -999)] = np.nan #set missing data to NaN
if excessreturns is True:
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
portfolios_daily = portfolios_daily.subtract(factors_daily['RF'], axis=0) #transform into excess returns
return portfolios_daily
elif freq is 'M':
# Download Fama/French 25 portfolios
portfolios_monthly = web.DataReader("25_Portfolios_5x5_Daily_CSV", "famafrench", start='1/1/1900')[0]
portfolios_monthly[(industries_monthly <= -99.99) | (industries_monthly == -999)] = np.nan #set missing data to NaN
portfolios_monthly.index = portfolios_monthly.index.to_timestamp()
if excessreturns is True:
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
factors_monthly.index = factors_monthly.index.to_timestamp()
portfolios_monthly = portfolios_monthly.subtract(factors_monthly['RF'], axis=0) #transform into excess returns
return portfolios_monthly
def download_recessions_data(freq='M', startdate='1/1/1900', enddate=dt.datetime.today()):
'''
Downloads NBER recessions from FRED and returns series.
freq can be either 'D' (daily) or 'M' (monthly).
startdate and enddate define the length of the timeseries.
'''
USREC_monthly = web.DataReader('USREC', 'fred',start = startdate, end=enddate)
if freq is 'M':
return USREC_monthly
if freq is 'D':
first_day = USREC_monthly.index.min() - pd.DateOffset(day=1)
last_day = USREC_monthly.index.max() + pd.DateOffset(day=31)
dayindex = pd.date_range(first_day, last_day, freq='D')
dayindex.name = 'DATE'
USREC_daily = USREC_monthly.reindex(dayindex, method='ffill')
return USREC_daily
def download_jpy_usd_data():
'''
Downloads USD/JPY exchange rate data from FRED and returns series.
'''
jpy = web.DataReader('DEXJPUS', 'fred', start = '1900-01-01')
return jpy
def download_cad_usd_data():
'''
Downloads USD/CAD exchange rate data from FRED and returns series.
'''
cad = web.DataReader('DEXCAUS', 'fred', start = '1900-01-01')
return cad
def download_vix_data():
'''
Downloads VIX index data from FRED and returns series.
'''
vix = web.DataReader('VIXCLS', 'fred', start = '1900-01-01')
return vix
def download_goyal_welch_svar():
'''
Downloads Goyal/Welch SVAR data from Amit Goyal's website and returns DataFrame.
'''
url = 'http://www.hec.unil.ch/agoyal/docs/PredictorData2017.xlsx'
sheet = pd.read_excel(url, sheet_name='Monthly')
dates = sheet['yyyymm']
SVAR = pd.DataFrame(sheet['svar'])
SVAR.index = [(dt.datetime(year = math.floor(date/100),month = date%100,day = 1)+dt.timedelta(days=32)).replace(day=1)-dt.timedelta(days=1) for date in dates]
return SVAR
def download_sadka_liquidity():
'''
Downloads Sadka liquidity factor data from <NAME>'s website and returns DataFrame.
'''
url = 'http://www2.bc.edu/ronnie-sadka/Sadka-LIQ-factors-1983-2012-WRDS.xlsx'
sheet = | pd.read_excel(url, sheet_name='Sheet1') | pandas.read_excel |
"""
Show completed state for a given set of experiments.
"""
import os
import sys
from datetime import datetime
import numpy as np
import pandas as pd
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
import util
from config import status_args
def get_experiment_hash(args):
"""
Return the experiment hash for the given args.
"""
# single test
if args.exp == 'influence':
exp_dict = {'n_test': args.n_test}
elif args.exp == 'influenceLE':
exp_dict = {'n_test': args.n_test}
elif args.exp == 'remove':
exp_dict = {'n_test': args.n_test, 'remove_frac': args.remove_frac}
elif args.exp == 'label':
exp_dict = {'n_test': args.n_test, 'edit_frac': args.edit_frac}
elif args.exp == 'poison':
exp_dict = {'n_test': args.n_test, 'poison_frac': args.poison_frac}
elif args.exp == 'counterfactual':
exp_dict = {'n_test': args.n_test, 'step_size': args.step_size}
elif args.exp == 'targeted_edit':
exp_dict = {'n_test': args.n_test, 'edit_frac': args.targeted_edit_frac}
# multi test
elif args.exp == 'influence_set':
exp_dict = {'val_frac': args.val_frac}
elif args.exp == 'remove_set':
exp_dict = {'remove_frac': args.remove_frac_set, 'val_frac': args.val_frac}
elif args.exp == 'label_set':
exp_dict = {'edit_frac': args.edit_frac_set, 'val_frac': args.val_frac}
elif args.exp == 'poison_set':
exp_dict = {'poison_frac': args.poison_frac_set, 'val_frac': args.val_frac}
elif args.exp == 'noise_set':
exp_dict = {'noise_frac': args.noise_frac, 'check_frac': args.check_frac,
'val_frac': args.val_frac}
exp_hash = util.dict_to_hash(exp_dict)
return exp_hash
def get_method_hash(args, method):
"""
Get method hash for the given args for the specified method.
"""
_, method_hash = util.explainer_params_to_dict(method, vars(args))
return method_hash
def get_noise_set_status(args, logger, out_dir, exp_hash):
"""
Construct pd.FataFrame with completion status of each experiment.
Note
- Custom method for the "Noise" experiment.
"""
results = []
for dataset in args.dataset_list:
logger.info(f'{dataset}')
for tree_type in args.tree_type:
logger.info(f'\t{tree_type}')
method_results = {'dataset': dataset, 'tree_type': tree_type}
for agg_type in args.agg_type:
for method in args.method_list:
method_hash = get_method_hash(args, method)
result_dir = os.path.join(args.in_dir,
dataset,
tree_type,
f'exp_{exp_hash}',
agg_type,
f'{method}_{method_hash}')
result_fp = os.path.join(result_dir, 'results.npy')
if os.path.exists(result_fp):
if args.status_type == 'time':
result = np.load(result_fp, allow_pickle=True)[()]
assert 'total_time' in result
method_results[f'{method}_{agg_type}'] = result['total_time']
elif args.status_type == 'completion':
method_results[f'{method}_{agg_type}'] = 1
results.append(method_results)
df = pd.DataFrame(results).sort_values(['tree_type', 'dataset'])
logger.info(f'\nCompleted status:\n{df}')
df.to_csv(os.path.join(out_dir, f'{args.status_type}.csv'), index=None)
def get_result_status(args, logger, out_dir, exp_hash):
"""
Construct pd.FataFrame with completion status of each experiment.
"""
results = []
for dataset in args.dataset_list:
logger.info(f'{dataset}')
for tree_type in args.tree_type:
logger.info(f'\t{tree_type}')
method_results = {'dataset': dataset, 'tree_type': tree_type}
for method in args.method_list:
method_hash = get_method_hash(args, method)
result_dir = os.path.join(args.in_dir,
dataset,
tree_type,
f'exp_{exp_hash}',
f'{method}_{method_hash}')
result_fp = os.path.join(result_dir, 'results.npy')
if os.path.exists(result_fp):
if args.status_type == 'time':
result = np.load(result_fp, allow_pickle=True)[()]
assert 'total_time' in result
method_results[method] = result['total_time']
elif args.status_type == 'completion':
method_results[method] = 1
else:
method_results[method] = np.nan
results.append(method_results)
df = | pd.DataFrame(results) | pandas.DataFrame |
#!/usr/bin/env python3
'''
Splits dataset into train/test/val
Author: <NAME>
Date: 10/16/2019
'''
import os
import argparse
import pandas as pd
import numpy as np
import csv
import shutil
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import train_test_split
try:
import utilities as utilities
except ImportError:
import utility.utilities as utilities
# For nepsa_all
# nepsa_all = ['B-DATE','I-DATE', 'B-EVENT','I-EVENT', 'B-NUM','I-NUM', 'B-SARCASM','I-SARCASM', 'B-OUTOFSCOPE','I-OUTOFSCOPE']
nepsa_all = ['B-PER','I-PER',
'B-ORG','I-ORG',
'B-LOC','I-LOC',
'B-MISC','I-MISC',
'B-FEEDBACK','I-FEEDBACK',
'B-GENERAL','I-GENERAL',
'B-PROFANITY','I-PROFANITY',
'B-VIOLENCE','I-VIOLENCE']
# For nepsa_target
# nepsa_target= ['B-FEEDBACK','I-FEEDBACK', 'B-DATE','I-DATE', 'B-EVENT','I-EVENT', 'B-NUM','I-NUM', 'B-SARCASM','I-SARCASM', 'B-OUTOFSCOPE','I-OUTOFSCOPE', 'B-GENERAL','I-GENERAL', 'B-PROFANITY','I-PROFANITY', 'B-VIOLENCE','I-VIOLENCE']
nepsa_target= ['B-PER','I-PER',
'B-ORG','I-ORG',
'B-LOC','I-LOC',
'B-MISC','I-MISC']
# For nepsa_aspect
# nepsa_aspect = ['B-DATE','I-DATE', 'B-EVENT','I-EVENT', 'B-NUM','I-NUM', 'B-SARCASM','I-SARCASM', 'B-OUTOFSCOPE','I-OUTOFSCOPE', 'B-PER','I-PER', 'B-ORG','I-ORG', 'B-LOC','I-LOC', 'B-MISC','I-MISC']
nepsa_aspect = ['B-FEEDBACK','I-FEEDBACK',
'B-GENERAL','I-GENERAL',
'B-PROFANITY','I-PROFANITY',
'B-VIOLENCE','I-VIOLENCE']
forbid = {
'nepsa_all' : nepsa_all,
'nepsa_target' : nepsa_target,
'nepsa_aspect' : nepsa_aspect
}
def text_tag_convert(input_file, logger, seq_len, verbose=False):
dir_name = os.path.dirname(input_file)
output_dir = os.path.join(dir_name, 'text_tag_only')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
sent_file = os.path.join(output_dir, 'text_only.txt')
tag_file = os.path.join(output_dir, 'tag_only.txt')
MIN_SEQ_LENGTH = seq_len[0]
MAX_SEQ_LENGTH = seq_len[1]
with open(input_file,'r', encoding='utf-8') as in_file, open(sent_file,'w', encoding='utf-8') as txt_f, open(tag_file,'w', encoding='utf-8') as tag_f:
sentence = []
tag = []
max_length=0
max_sentence=''
max_counter=0
min_counter=0
sent_counter=0
line_num=0
j=0
for i,row in enumerate(in_file):
#To know which line is defunct in file
#print(i+1)
row = row.strip().split()
# Assuming input file has four columns
# token, start_position, end_position, entity_type
# Changed to greater than 1 to
# fit old nepali-ner data as well
if len(row)>1:
sentence.append(row[0])
tag.append(row[-1])
else:
line_num+=1
if len(sentence) > max_length:
max_length = len(sentence)
max_sentence=sentence
j=line_num
if len(sentence) < MAX_SEQ_LENGTH and len(sentence) > MIN_SEQ_LENGTH:
txt_f.write(' '.join(sentence)+'\n')
tag_f.write(' '.join(tag)+'\n')
sent_counter+=1
else:
if len(sentence) > MAX_SEQ_LENGTH:
max_counter+=1
if verbose:
logger.info("Length of longer sentence = {}".format(len(sentence)))
else:
min_counter+=1
if verbose:
logger.info("Length of shorter sentence = {}".format(len(sentence)))
sentence = []
tag = []
logger.info("Max sentence length limit = {}".format(MAX_SEQ_LENGTH))
logger.info("Min sentence length limit = {}".format(MIN_SEQ_LENGTH))
logger.info("Longest sentence length = {}".format(max_length))
logger.info("Longest sentence at line number = {}".format(j))
logger.info("Longest sentence counter = {}".format(max_counter))
logger.info("Shortest sentence counter = {}".format(min_counter))
logger.info("% of sentence removed = {}%".format(max_counter+min_counter/line_num * 100))
logger.info("Total number of sentence before removal= {}".format(line_num))
logger.info("Total number of sentence after removal= {}".format(sent_counter))
in_file.close()
txt_f.close()
tag_f.close()
logger.info("Text and Tag files are stored in {}".format(output_dir))
logger.info("******************************************************")
return sent_file, tag_file
'''
Function to write dataframe into files
'''
def write_df(df, fname, logger, split_type):
invalid_counter = 0
with open(fname, 'w', encoding='utf-8') as f:
for i, r in df.iterrows():
# Splits the TEXT and TAG into chunks
text = r['TEXT'].split()
tag = r['TAG'].split()
tag = ['O' if x not in forbid[split_type] else x for x in tag]
# Remove specific lines having these categories
# if not set(tag).intersection(set(['B-SARCASM','I-SARCASM', 'B-OUTOFSCOPE','I-OUTOFSCOPE'])):
# Remove if it contains only 'O'
if list(set(tag)) != ['O']:
for t1, t2 in zip(text, tag):
f.write(t1+' '+t2+'\n')
f.write('\n')
else:
invalid_counter+=1
logger.info('Number of sentences containing only \'O\': {}'.format(invalid_counter))
logger.info('Created: {}'.format(fname))
f.close()
return invalid_counter
'''
Partitions the given data into chunks
Create train/test file accordingly
'''
def split_train_test(source_path, save_path, logger, split_type):
sent_file = os.path.join(source_path, 'text_only.txt')
tag_file = os.path.join(source_path, 'tag_only.txt')
logger.info("Saving path: {}".format(save_path))
# if not os.path.exists(save_path):
# os.mkdir(save_path)
train_fname = os.path.join(save_path,'train.txt')
test_fname = os.path.join(save_path, 'test.txt')
val_fname = os.path.join(save_path, 'dev.txt')
df_txt = pd.read_csv(sent_file, delimiter='\n', encoding='utf-8',
skip_blank_lines=True, header=None,
quoting=csv.QUOTE_NONE, names=['TEXT'])
df_tag = pd.read_csv(tag_file, delimiter='\n', encoding='utf-8',
skip_blank_lines=True, header=None,
quoting=csv.QUOTE_NONE, names=['TAG'])
df = df_txt.join(df_tag).sample(frac=1).reset_index(drop=True)
# To split into train and intermediate 80/20
mask = np.random.rand(len(df)) < 0.8
train_df = df[mask]
intermediate_df = df[~mask]
# To split intermediate into 10/10 into test and dev
val_mask = np.random.rand(len(intermediate_df)) < 0.5
test_df = intermediate_df[val_mask]
val_df = intermediate_df[~val_mask]
# Write those train/test dataframes into files
invalid_train_count = write_df(train_df, train_fname, logger, split_type)
invalid_test_count = write_df(test_df, test_fname, logger, split_type)
invalid_val_count = write_df(val_df, val_fname, logger, split_type)
total_invalid = invalid_train_count + invalid_test_count + invalid_val_count
total_data_length = len(train_df) + len(test_df) + len(val_df)
# Print stat
logger.info("Length of train dataset: {}".format(len(train_df) - invalid_train_count))
logger.info("Length of test dataset: {}".format(len(test_df) - invalid_test_count))
logger.info("Length of val dataset: {}".format(len(val_df) - invalid_val_count))
logger.info("Total dataset reduced by: {:.3f}%".format((total_invalid / total_data_length) * 100))
'''
Partitions the given data into chunks
Create train/test file accordingly
***Obsolete yet for reference***
'''
def split_train_test_csv(source_path, save_path, logger):
logger.info("Saving path: {}".format(save_path))
train_fname = os.path.join(save_path,'train.txt')
test_fname = os.path.join(save_path, 'test.txt')
val_fname = os.path.join(save_path, 'dev.txt')
df_txt = pd.read_csv(source_path, delimiter=',', encoding='utf-8',
skip_blank_lines=True, header=['ss', 'ac', 'at', 'text'],
quoting=csv.QUOTE_MINIMAL, names=['TEXT'])
df = df_txt.sample(frac=1).reset_index(drop=True)
# To split into train and intermediate 80/20
mask = np.random.rand(len(df)) < 0.8
train_df = df[mask]
intermediate_df = df[~mask]
# To split intermediate into 10/10 into test and dev
val_mask = np.random.rand(len(intermediate_df)) < 0.5
test_df = intermediate_df[val_mask]
val_df = intermediate_df[~val_mask]
train_df.to_csv(train_fname, header=False, index=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ", encoding='utf-8')
test_df.to_csv(test_fname, header=False, index=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ", encoding='utf-8')
val_df.to_csv(val_fname, header=False, index=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ", encoding='utf-8')
# Print stat
logger.info("Length of train dataset: {}".format(len(train_df)))
logger.info("Length of test dataset: {}".format(len(test_df)))
logger.info("Length of val dataset: {}".format(len(val_df)))
def write_csv(df, fname):
df.to_csv(fname, header=False, index=False,
quoting=csv.QUOTE_MINIMAL,
escapechar=" ",
encoding='utf-8')
'''
Partitions the given data using GroupShuffleSplit
This function will split train/test/val for each
aspect category equally
Split 80/10/10 for all the category
** Not based on the whole document
'''
def split_csv(source_path, save_path, logger):
logger.info("Saving path: {}".format(save_path))
train_fname = os.path.join(save_path,'train.txt')
test_fname = os.path.join(save_path, 'test.txt')
val_fname = os.path.join(save_path, 'dev.txt')
df_txt = pd.read_csv(source_path, delimiter=',',
encoding='utf-8',
skip_blank_lines=True,
header=None,
names=['ss', 'ac', 'at', 'text'])
# Split the df based on sentiment strength
# into positive and negative
gss = GroupShuffleSplit(test_size=.20, n_splits=1, random_state = 163).split(df_txt, groups=df_txt['ss'])
# Get positive and negative dataframe
for positive_df, negative_df in gss:
# Get data based on the index
negative = df_txt.iloc[negative_df]
positive = df_txt.iloc[positive_df]
# Split 80/10/10 -> train, test, val
# based on sentiment strength
train_neg, test_val_neg = train_test_split(negative, test_size=0.2)
train_pos, test_val_pos = train_test_split(positive, test_size=0.2)
test_neg, val_neg = train_test_split(test_val_neg, test_size=0.5)
test_pos, val_pos = train_test_split(test_val_pos, test_size=0.5)
# Concat negative and positive dataframe and shuffle
train_df = | pd.concat([train_pos, train_neg], ignore_index=True) | pandas.concat |
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from tests.settings import POSTGRESQL_ENGINE, SQLITE_ENGINE
from tests.utils import get_repository_path, DBTest
from ukbrest.common.pheno2sql import Pheno2SQL
class Pheno2SQLTest(DBTest):
@unittest.skip('sqlite being removed')
def test_sqlite_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check table exists
tmp = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not tmp.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_exit(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
temp_dir = tempfile.mkdtemp()
# Run
with Pheno2SQL(csv_file, db_engine, tmpdir=temp_dir) as p2sql:
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary files were deleted
assert len(os.listdir(temp_dir)) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_custom_tmpdir(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
with Pheno2SQL(csv_file, db_engine, tmpdir='/tmp/custom/directory/here', delete_temp_csv=False) as p2sql:
# Run
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary are still there
assert len(os.listdir('/tmp/custom/directory/here')) > 0
## Check that temporary is now clean
assert len(os.listdir('/tmp/custom/directory/here')) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_auxiliary_table_is_created(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('fields'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_is_created_and_has_minimum_data_required(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_with_more_information(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'field_id'] == '21'
assert tmp.loc['c21_0_0', 'inst'] == 0
assert tmp.loc['c21_0_0', 'arr'] == 0
assert tmp.loc['c21_0_0', 'coding'] == 100261
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_0_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_0_0', 'description'] == 'An string value'
assert tmp.loc['c21_1_0', 'field_id'] == '21'
assert tmp.loc['c21_1_0', 'inst'] == 1
assert tmp.loc['c21_1_0', 'arr'] == 0
assert tmp.loc['c21_1_0', 'coding'] == 100261
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_1_0', 'description'] == 'An string value'
assert tmp.loc['c21_2_0', 'field_id'] == '21'
assert tmp.loc['c21_2_0', 'inst'] == 2
assert tmp.loc['c21_2_0', 'arr'] == 0
assert tmp.loc['c21_2_0', 'coding'] == 100261
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_2_0', 'description'] == 'An string value'
assert tmp.loc['c31_0_0', 'field_id'] == '31'
assert tmp.loc['c31_0_0', 'inst'] == 0
assert tmp.loc['c31_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c31_0_0', 'coding'])
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c31_0_0', 'type'] == 'Date'
assert tmp.loc['c31_0_0', 'description'] == 'A date'
assert tmp.loc['c34_0_0', 'field_id'] == '34'
assert tmp.loc['c34_0_0', 'inst'] == 0
assert tmp.loc['c34_0_0', 'arr'] == 0
assert tmp.loc['c34_0_0', 'coding'] == 9
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'type'] == 'Integer'
assert tmp.loc['c34_0_0', 'description'] == 'Some integer'
assert tmp.loc['c46_0_0', 'field_id'] == '46'
assert tmp.loc['c46_0_0', 'inst'] == 0
assert tmp.loc['c46_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c46_0_0', 'coding'])
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'type'] == 'Integer'
assert tmp.loc['c46_0_0', 'description'] == 'Some another integer'
assert tmp.loc['c47_0_0', 'field_id'] == '47'
assert tmp.loc['c47_0_0', 'inst'] == 0
assert tmp.loc['c47_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c47_0_0', 'coding'])
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c47_0_0', 'type'] == 'Continuous'
assert tmp.loc['c47_0_0', 'description'] == 'Some continuous value'
assert tmp.loc['c48_0_0', 'field_id'] == '48'
assert tmp.loc['c48_0_0', 'inst'] == 0
assert tmp.loc['c48_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c48_0_0', 'coding'])
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'type'] == 'Time'
assert tmp.loc['c48_0_0', 'description'] == 'Some time'
def test_postgresql_auxiliary_table_check_types(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
sql_types = """
select column_name, data_type
from information_schema.columns
where table_name = 'fields';
"""
tmp = pd.read_sql(sql_types, create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['field_id', 'data_type'] == 'text'
assert tmp.loc['inst', 'data_type'] == 'bigint'
assert tmp.loc['arr', 'data_type'] == 'bigint'
assert tmp.loc['coding', 'data_type'] == 'bigint'
assert tmp.loc['table_name', 'data_type'] == 'text'
assert tmp.loc['type', 'data_type'] == 'text'
assert tmp.loc['description', 'data_type'] == 'text'
def test_postgresql_auxiliary_table_constraints(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('fields', column_query='column_name', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
# index on 'event' column
constraint_sql = self._get_table_contrains('fields', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 6
assert 'arr' in columns
assert 'field_id' in columns
assert 'inst' in columns
assert 'table_name' in columns
assert 'type' in columns
assert 'coding' in columns
def test_postgresql_two_csv_files(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_1_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine))
expected_columns = ["eid","c100_0_0", "c100_1_0", "c100_2_0", "c110_0_0", "c120_0_0", "c130_0_0", "c140_0_0", "c150_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 5
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert int(tmp.loc[1, 'c34_0_0']) == -33
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 41.55312
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert tmp.loc[5, 'c21_0_0'] == 'Option number 5'
assert tmp.loc[5, 'c21_1_0'] == 'Maybe'
assert tmp.loc[5, 'c21_2_0'] == 'Probably'
assert pd.isnull(tmp.loc[5, 'c31_0_0'])
assert int(tmp.loc[5, 'c34_0_0']) == -4
assert int(tmp.loc[5, 'c46_0_0']) == 1
assert pd.isnull(tmp.loc[5, 'c47_0_0'])
assert tmp.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 3
assert int(tmp.loc[1, 'c100_0_0']) == -9
assert int(tmp.loc[1, 'c100_1_0']) == 3
assert pd.isnull(tmp.loc[1, 'c100_2_0'])
assert tmp.loc[1, 'c110_0_0'].round(5) == 42.55312
assert int(tmp.loc[1, 'c120_0_0']) == -33
assert tmp.loc[1, 'c130_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c140_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert tmp.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert pd.isnull(tmp.loc[3, 'c100_0_0'])
assert int(tmp.loc[3, 'c100_1_0']) == -4
assert int(tmp.loc[3, 'c100_2_0']) == -10
assert tmp.loc[3, 'c110_0_0'].round(5) == -35.31471
assert int(tmp.loc[3, 'c120_0_0']) == 0
assert tmp.loc[3, 'c130_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c140_0_0'].strftime('%Y-%m-%d') == '1997-04-15'
assert pd.isnull(tmp.loc[3, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_single_table(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2020-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '1990-02-15'
assert query_result.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_tables(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_multiple_tables(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_multiple_tables(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
def test_postgresql_two_csv_files_flipped_query_multiple_tables(self):
# Prepare
# In this test the files are just flipped
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv02, csv01), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert | pd.isnull(query_result.loc[3, 'c150_0_0']) | pandas.isnull |
import time
from multiprocess import Process
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import os
import time
from tqdm import tqdm
import numpy as np
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from multiprocessing import Pool
import multiprocessing as mp
def split_data(data_clean, number_split = 8):
l_data_clean = np.array_split(data_clean, number_split)
return l_data_clean
def split_product(product_name):
type_product = product_name.split("/")[0]
return type_product
def multithread_clean(index_data):
data_clean = l_data_clean[index_data]
log_group_product = []
if type_data == 'laptop':
log_group_product = ['Laptop - Máy Vi Tính - Linh kiện']
elif type_data == 'nhacua':
tmp_l = []
for index, row in data_clean.iterrows():
type_product = split_product(row["productset_group_name"])
row["productset_group_name"] = type_product
tmp_l.append(row)
data_clean = | pd.DataFrame(tmp_l) | pandas.DataFrame |
# -----------------------------------------------------------------------------
# Account statement helpers
import logging
import datetime
import camelot
import pandas as pd
import os
import json
from pdfquery.cache import FileCache
import pdfquery
import pdfminer
from .utils import *
logger = logging.getLogger("hsbcpdf.helpers.accountstatements")
class EnumSumAccountTypes:
HKDSAVINGS = 'HKD Savings'
HKDCURRENT = 'HKD Current'
FCYSAVINGS = 'FCY Savings'
FCYCURRENT = 'FCY Current'
class AccountTypes:
HKDSAVINGS = 'HKDSavings'
HKDCURRENT = 'HKDCurrent'
FCYSAVINGS = 'FCYSavings'
FCYCURRENT = 'FCYCurrent'
class TableZone:
__doc__ = "Find table zone and columns positions"
class Chunk:
def __init__(self, page, yup, ybot):
self.page = page
self.yup = yup
self.ybot = ybot
def __init__(self, page_height, page_width, section, account, st_date):
self.page_height = page_height
self.page_width = page_width
self.account = account
self.st_date = st_date
self.chunks = []
self.columns = None
self.table = None
self.statement = {'previous_balance': {self.account: {}}, 'new_balance': {self.account: {}}, 'entries': []}
begin_page = section.page
begin_yup = section.ybot
end_page = section.next.page
end_ybot = section.next.yup
top_margin = 690
bottom_margin = 69
if begin_page == end_page:
self.chunks.append(TableZone.Chunk(begin_page, begin_yup, end_ybot))
else:
self.chunks.append(TableZone.Chunk(begin_page, begin_yup, bottom_margin))
for i in range(end_page - begin_page -1):
self.chunks.append(TableZone.Chunk(begin_page + i, self.page_height, bottom_margin))
if end_ybot < top_margin:
self.chunks.append(TableZone.Chunk(end_page, self.page_height, end_ybot))
logger.debug("Section of account '{}' has {} chuncks".format(account, len(self.chunks)))
def get_tables_format(self, pdf):
logger.debug("search table hearder for account '{}'".format(self.account))
for c in self.chunks:
# seek table header
# first get large light grey horizontal line
hl = pdf.pq(
f'LTPage[page_index="{c.page-1}"] LTLine[height="0.0"]:in_bbox("0, {c.ybot}, {self.page_width}, {c.yup}")'
).filter(lambda i: float(this.get('linewidth', 0)) > 10)
if len(hl) == 0:
raise TemplateException("could not find Horizontal line of table header (page {} in bbox 0,{}, {}, {})".format(c.page,c.ybot, self.page_width, c.yup))
hl = hl[0].layout
linewidth = hl.linewidth
upper = hl.y0 + (linewidth/2) + 1
lower = hl.y0 - (linewidth/2) - 1
c.yup = upper
# then search separator vertical lines in header
logger.debug("search table hearder columns for account '{}' in page[{}] bbob[0, {}, {}, {}]".format(self.account, c.page, lower, self.page_width, upper))
if self.columns is None:
# do it once as table format is same in each chunks
self.columns = []
vls = pdf.pq(
f'LTPage[page_index="{c.page-1}"] LTLine[width="0.0"]:in_bbox("0, {lower}, {self.page_width}, {upper}")'
).filter(lambda i: float(this.get('linewidth', 0)) < 1)
if len(vls) == 0:
raise TemplateException("could not find Vertical lines of table header (page {} in bbox 0,{}, {}, {})".format(c.page,c.ybot, self.page_width, c.yup))
for vl in vls:
self.columns.append(vl.layout.x0)
self.columns.sort()
logger.debug("found these ({}) columns from hearder {}".format(len(self.columns), self.columns))
def extract_tables(self, pdfpath):
cols = ','.join(map(str, self.columns))
for c in self.chunks:
logger.debug("process table in page[{}] bbox[0,{},{},{}] with columns[{}]".format(c.page, c.ybot, self.page_width, c.yup, cols))
tables = camelot.read_pdf(
pdfpath,
pages=str(c.page),
flavor="stream",
table_areas=[f'0, {c.yup}, {self.page_width}, {c.ybot}'],
columns=[cols],
split_text=True)
logger.debug('found tables: {} - {}'.format(tables[0].parsing_report, tables[0].shape))
if self.table is None:
self.table = tables[0].df[1:]
else:
self.table = pd.concat([self.table, tables[0].df[1:]])
logger.debug("the table:\n{}".format(self.table.head().to_string()))
#camelot.plot(tables[0], kind='grid')
#plt.show()
self.clean_table()
def clean_table(self):
pass
def extract_date(self, strdt):
res = datetime.datetime.strptime(strdt + ' ' + str(self.st_date.year), '%d %b %Y')
if res > self.st_date:
res = res.replace(year=self.st_date.year - 1)
return res
def check_consistency(self, summary):
new_balances = self.statement['new_balance'][self.account]
expected_balances = summary['new_acc_balances'][self.account]
for k,v in new_balances.items():
if k not in expected_balances.keys():
if round(v, 2) != 0. :
raise ConsistencyException(
"Missing non null balance in Summary for [{}({})] {}".format(
self.account,
k,
v
)
)
elif round(v, 2) != round(expected_balances[k]['ccy'], 2):
raise ConsistencyException(
"Mismatching balance on [{}({})] {}/{}".format(
self.account,
k,
v,
round(expected_balances[k]['ccy'], 2)
)
)
class TableZoneHkd(TableZone):
def clean_table(self):
shape = self.table.shape
logger.debug(shape)
# get first line as the previous balance
startidx=1
previous_balance_tag = self.table.iloc[0,1]
val = self.table.iloc[0, 4]
logger.debug("value to use as a float: [{}]".format(val))
previous_balance = float(val.replace(",", "")) if isinstance(val, str) else val
if self.table.iloc[0, 5] == 'DR':
previous_balance = -previous_balance
if previous_balance_tag != "B/F BALANCE":
# if first line is not "B/F BALANCE" likely this is the first statement or previous balance was 0
previous_balance = 0
startidx = 0
self.statement['previous_balance'][self.account]['HKD'] = previous_balance
dt = ""
desc = ""
new_balance = previous_balance
for index, row in self.table.iloc[startidx:, :].iterrows():
if row[0] != "": dt = self.extract_date(row[0])
desc = (desc + " " if desc != "" else "") + row[1]
credit = row[2]
debit = row[3]
amount = None
logger.debug("date[{}] desc[{}] credit[{}] debit[{}]".format(dt, desc, credit, debit))
if credit is not None and credit != "":
amount = float(credit.replace(",", ""))
elif debit is not None and debit != "":
amount = -float(debit.replace(",", ""))
else:
continue
new_balance += amount
self.statement['entries'].append({
'account': self.account,
'post_date': dt,
'transaction_date': dt,
'description': desc,
'currency': "HKD",
'amount': amount
})
desc = ""
self.statement['new_balance'][self.account]['HKD'] = new_balance
logger.debug(self.statement)
class TableZoneFcy(TableZone):
def clean_table(self):
shape = self.table.shape
logger.debug(shape)
logger.debug('table shape: {}'.format(self.table.shape))
dt = ""
ccy = ""
desc = ""
new_balance = 0.
for index, row in self.table.iterrows():
# first line with new currency is previous balance
if row[0] != ccy and row[0] != "":
if ccy != "":
# record new balance of currently parsing account before moving to next
self.statement['new_balance'][self.account][ccy] = new_balance
new_balance = 0.
ccy = row[0]
if row[5] != "":
# When this is the first movement on a currency there is no previous balance
previous_balance_tag = row[2]
previous_balance = float(row[5].replace(",", ""))
if row[6] == 'DR':
previous_balance = -previous_balance
if previous_balance_tag != "B/F BALANCE":
raise TemplateException(
"First line should contain B/F BALANCE vs [{}]".format(previous_balance_tag))
self.statement['previous_balance'][self.account][ccy] = previous_balance
new_balance = previous_balance
if ((row[1] != "") and (row[0] != "Total")): dt = self.extract_date(row[1])
desc = (desc + " " if desc != "" else "") + row[2]
credit = row[3]
debit = row[4]
amount = None
logger.debug("ccy[{}] date[{}] desc[{}] credit[{}] debit[{}]".format(ccy, dt, desc, credit, debit))
if credit is not None and credit != "":
amount = float(credit.replace(",", ""))
elif debit is not None and debit != "":
amount = -float(debit.replace(",", ""))
else:
if desc == "B/F BALANCE":
desc = ""
continue
new_balance += amount
self.statement['entries'].append({
'post_date': dt,
'transaction_date': dt,
'account': self.account,
'description': desc,
'currency': ccy,
'amount': amount
})
desc = ""
self.statement['new_balance'][self.account][ccy] = new_balance
logger.debug(self.statement)
class TableZoneSum(TableZone):
map_type = {
EnumSumAccountTypes.HKDSAVINGS: AccountTypes.HKDSAVINGS,
EnumSumAccountTypes.HKDCURRENT: AccountTypes.HKDCURRENT,
EnumSumAccountTypes.FCYSAVINGS: AccountTypes.FCYSAVINGS,
EnumSumAccountTypes.FCYCURRENT: AccountTypes.FCYCURRENT
}
def __init__(self, page_height, page_width, section, account, st_date):
super().__init__(page_height, page_width, section, account, st_date)
self.summary = {'total_balance_hkd': None, 'new_acc_balances': {}}
def extract_amount(self, stramount, dr):
logger.debug("string to convert to float:[{}]".format(stramount))
amount = stramount if isinstance(stramount, float) else float(stramount.replace(",", ""))
if dr == 'DR':
amount = - amount
return amount
def clean_table(self):
shape = self.table.shape
logger.debug(shape)
acc_bal = self.summary['new_acc_balances']
logger.debug('table shape: {}'.format(self.table.shape))
acc = ""
total = 0.
expected_total = .0
# skip first 2 lines that are header part and account narrative
for index, row in self.table[2:].iterrows():
logger.debug("process row ({}): <{}>".format(index, row))
if row[0] is not None and row[0] != "":
if row[0] == 'Total':
self.summary['total_balance_hkd'] = self.extract_amount(row[6], row[7])
continue
elif ('Card' in row[0]):
break
elif row[0] not in TableZoneSum.map_type.keys():
raise TemplateException("Summary contains an unknow Account type [{}]".format(row[0]))
else:
acc = TableZoneSum.map_type[row[0]]
ccy = row[2]
amount = self.extract_amount(row[4], row[5])
amounthkd = self.extract_amount(row[6], row[7])
logger.debug("account[{}] ccy[{}] balance[{}] balancehkd[{}]".format(acc, ccy, amount, amounthkd))
expected_total += amounthkd
if acc not in acc_bal.keys():
acc_bal[acc] = {ccy: {'ccy': amount, 'hkd':amounthkd} }
else:
acc_bal[acc][ccy] = {'ccy': amount, 'hkd':amounthkd}
logger.debug("Statement summary: {}".format(self.summary))
def check_consistency(self, summary):
# self.summary = {'total_balance_hkd': None, 'new_acc_balances': {}}
total = self.summary['total_balance_hkd']
new_bal = self.summary['new_acc_balances']
amount = 0.
for v in new_bal.values():
for vccy in v.values():
amount += vccy['hkd']
if round(amount, 2) != round(total, 2):
raise ConsistencyException("Mismatching Summary balance on {}/{}".format(round(amount, 2), round(total, 2)))
class BaseFactory:
_scrapers = []
@classmethod
def get_scraper(cls, pdfpath, pdf=None):
if not os.path.exists(pdfpath):
raise ScraperException(f'"{pdfpath}" file not found')
if not os.path.isfile(pdfpath):
raise ScraperException(f'"{pdfpath}" not a file')
pdf = pdfquery.PDFQuery(pdfpath)
pdf.load()
for s in cls._scrapers:
if s.probe_bank(pdf) and s.probe_type(pdf):
logger.debug("pdf file matches {}.{}".format(s.st_bank, s.st_type))
return s(pdfpath, pdf)
class BaseStatement:
_BANK_SIGNATURE = []
_TYPE_SIGNATURE = []
st_bank = None
st_type = None
@classmethod
def probe_bank(cls, pdf):
for elem in cls._BANK_SIGNATURE:
if len(elem.querys(pdf)) == 0:
logger.debug("pdf file does not matches bank {}".format(cls.st_bank))
return False
logger.debug("pdf file matches bank {}".format(cls.st_bank))
return True
@classmethod
def probe_type(cls, pdf):
for elem in cls._TYPE_SIGNATURE:
if len(elem.querys(pdf)) == 0:
logger.debug("pdf file does not matches type {}".format(cls.st_type))
return False
logger.debug("pdf file matches type {}".format(cls.st_type))
return True
def __init__(self, pdfpath, pdf = None):
self.logger = logging.getLogger("hsbcpdf.helpers.basestatement")
self.pdfpath = pdfpath
self.pdf = pdf
if self.pdf is None:
self.pdf = pdfquery.PDFQuery(pdfpath)
self.pdf.load()
self.page_height = None
self.page_width = None
self.account_number = None
self.st_date = None
def match_template(self):
# get file pages format
p = self.pdf.pq('LTPage[page_index="0"]')[0]
self.page_height = p.layout.height
self.page_width = p.layout.width
self.logger.debug("page format: WxH = {}x{}".format(
self.page_width,
self.page_height
))
def extract_tables(self):
pass
def check_consistency(self):
pass
def merge_all(self):
self.statement = {
'type' : self.st_type,
'main_account': self.account_number,
'statement_date': self.st_date,
'previous_balance': {},
'new_balance': {},
'entries': []
}
def process(self):
self.match_template()
self.extract_tables()
self.check_consistency()
self.merge_all()
return self
def get_df(self):
df = | pd.DataFrame(self.statement['entries']) | pandas.DataFrame |
# Must run example4.py first
# Read an Excel sheet and save running config of devices using pandas
import pandas as pd
from netmiko import ConnectHandler
# Read Excel file of .xlsx format
data = | pd.read_excel(io="Example4-Device-Details.xlsx", sheet_name=0) | pandas.read_excel |
__author__ = 'rhythmicstar'
import gffutils
import pandas as pd
def possible_nmd(nmd_file):
splicing_data = pd.read_csv(nmd_file, header=None, sep='\s+')
index = pd.Index(splicing_data[3])
event_ids = | pd.Series(index, name='event_id') | pandas.Series |
# coding: utf-8
# ### **Loading Libraries**
# In[ ]:
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import preprocessing
import os
print(os.listdir("../input"))
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import ensemble
from sklearn.metrics import accuracy_score
# ## **Importing datasets**
# In[ ]:
get_ipython().run_cell_magic('time', '', 'event_type=pd.read_csv("../input/event_type.csv",error_bad_lines=False)\ntrain = pd.read_csv("../input/train.csv")\nseverity_type = pd.read_csv("../input/severity_type.csv")\nlog_feature = pd.read_csv("../input/log_feature.csv")\ntest = pd.read_csv("../input/test.csv")\nresource_type = pd.read_csv("../input/resource_type.csv",error_bad_lines=False)\nsample_submission = pd.read_csv("../input/sample_submission.csv")')
# In[ ]:
print("test",test.shape)
print("train",train.shape)
# ### Input datasets heads
# In[ ]:
print('test',test.head())
print('train',train.head(4))
print('sample_submission',sample_submission.head())
print('event_type',event_type.shape,event_type.head(2))
print('severity_type',severity_type.shape,severity_type.head(2))
print('log_feature',log_feature.shape,log_feature.head(2))
print('resource_type',resource_type.shape,resource_type.head(2))
# ### **Visualization of Severity**
# In[ ]:
val=list(train['fault_severity'].value_counts())
for i in range(len(val)):
print(train['fault_severity'].value_counts().index[i],round(val[i]/sum(val)*100),'%')
# ## ** Data conversion**
# In[ ]:
event_type['id']=pd.to_numeric(event_type['id'],errors='coerce')
#converting object datatype into numeric
# In[ ]:
event_type.dtypes
# # **Training Preprocessing**
# ### **Merging**
# In[ ]:
def merge_fn(df1,df2,col_name,how_param):
merged_df=df1.merge(df2,how=how_param,on=col_name)
return merged_df
# In[ ]:
train_merge1=merge_fn(train,event_type.drop_duplicates(subset=['id']),'id','left')
train_merge2=merge_fn(train_merge1,severity_type.drop_duplicates(subset=['id']),'id','left')
train_merge3=merge_fn(train_merge2,log_feature.drop_duplicates(subset=['id']),'id','left')
train_merge4=merge_fn(train_merge3,resource_type.drop_duplicates(subset=['id']),'id','left')
# In[ ]:
train_merge4.shape
# In[ ]:
train_merge4.head()
# #### **Calculating mean volumn**
# In[ ]:
train_merge4['mean_volumn']=train_merge4.groupby(['location','event_type','severity_type','log_feature','resource_type'])['volume'].transform('mean')
# #### **Merged Training data**
# In[ ]:
train_merge4.head()
# In[ ]:
train_merge4.dtypes
# ### **Checking for missing values**
# In[ ]:
train_merge4.isnull().sum()
# ### **Finding categorical columns**
# In[ ]:
cat_col=list(set(train_merge4.columns)-set(train_merge4._get_numeric_data().columns))
# ### **Categorical conversion**
# In[ ]:
def categorical_conversion(df,cat_col):
for i in range(len(cat_col)):
df[cat_col[i]]=df[cat_col[i]].astype('category')
return df
# In[ ]:
train_merge4=categorical_conversion(train_merge4,cat_col)
# In[ ]:
train_merge4.dtypes
# ### **Label encoding**
# In[ ]:
def label_encoding_conversion(df,cat_col):
le=preprocessing.LabelEncoder()
for i in range(len(cat_col)):
df[cat_col[i]]=le.fit_transform(df[cat_col[i]])
return df
# In[ ]:
train_merge4.columns
# In[ ]:
train_merge4=label_encoding_conversion(train_merge4,cat_col)
# In[ ]:
train_merge4.columns
# ### **Droping unique values**
# In[ ]:
train_merge4.drop(['id'],axis=1,inplace=True)
# In[ ]:
target=train_merge4[['fault_severity']]
# In[ ]:
train_merge4.drop(['fault_severity'],axis=1,inplace=True)
# In[ ]:
train_merge4.head()
# In[ ]:
train_merge4.dtypes
# In[ ]:
test.head()
# ## **TEST data preparation**
# In[ ]:
test.head()
# In[ ]:
test.shape
# ## ** Test data merging**
# In[ ]:
test_merge1=merge_fn(test,event_type.drop_duplicates(subset=['id']),'id','left')
test_merge2=merge_fn(test_merge1,severity_type.drop_duplicates(subset=['id']),'id','left')
test_merge3=merge_fn(test_merge2,log_feature.drop_duplicates(subset=['id']),'id','left')
test_merge4=merge_fn(test_merge3,resource_type.drop_duplicates(subset=['id']),'id','left')
# In[ ]:
test_merge4.shape
# ###**Adding new feature- Mean volume**
# In[ ]:
test_merge4['mean_volumn']=test_merge4.groupby(['location','event_type','severity_type','log_feature','resource_type'])['volume'].transform('mean')
# In[ ]:
severity_type.head()
# In[ ]:
test_merge4.head(2)
# #### ** Categorical columns**
# In[ ]:
cat_col
# ### **Categorical conversion **
# In[ ]:
test_merge4=categorical_conversion(test_merge4,cat_col)
# In[ ]:
test_merge4.dtypes
# ### **Label encoding**
# In[ ]:
test_merge4=label_encoding_conversion(test_merge4,cat_col)
# In[ ]:
test_merge4.dtypes
# ### **Removing unique columns**
# In[ ]:
test_merge4.drop(['id'],axis=1,inplace=True)
# In[ ]:
train_merge4.columns
# In[ ]:
test_merge4.columns
# ### **LogisticRegression**
# In[ ]:
train_merge4.columns
# In[ ]:
lr=LogisticRegression()
lr.fit(train_merge4,target)
lr_pred=lr.predict(test_merge4)
accuracy_score(pd.DataFrame(lr.predict(train_merge4)),target)
# ### **RandomForestClassifier**
# In[ ]:
rf=RandomForestClassifier()
rf.fit(train_merge4,target)
rf_pred=rf.predict(test_merge4)
accuracy_score(pd.DataFrame(rf.predict(train_merge4)),target)
# ### **GaussianNB**
# In[ ]:
nb=GaussianNB()
nb.fit(train_merge4,target)
nb.predict(test_merge4)
accuracy_score(pd.DataFrame(nb.predict(train_merge4)),target)
# ### **DecisionTreeClassifier**
# In[ ]:
dt=tree.DecisionTreeClassifier()
dt.fit(train_merge4,target)
dt.predict(test_merge4)
accuracy_score(pd.DataFrame(dt.predict(train_merge4)),target)
# ### **SVC**
# In[ ]:
svc_ml=svm.SVC()
svc_ml.fit(train_merge4,target)
svc_ml.predict(test_merge4)
accuracy_score(pd.DataFrame(svc_ml.predict(train_merge4)),target)
# ### **AdaBoostClassifier**
# In[ ]:
ada=AdaBoostClassifier()
ada.fit(train_merge4,target)
ada.predict(test_merge4)
accuracy_score(pd.DataFrame(ada.predict(train_merge4)),target)
# ### **KNeighborsClassifier**
# In[ ]:
knn=KNeighborsClassifier()
knn.fit(train_merge4,target)
knn.predict(test_merge4)
accuracy_score(pd.DataFrame(knn.predict(train_merge4)),target)
# ### **GradientBoostingClassifier**
# In[ ]:
gb=ensemble.GradientBoostingClassifier()
gb.fit(train_merge4,target)
gb_pre=gb.predict(test_merge4)
accuracy_score(pd.DataFrame(gb.predict(train_merge4)),target)
# ## Model comparison consolidate function
# In[ ]:
dic_data={}
list1=[]
max_clf_output=[]
tuple_l=()
def data_modeling(X,target,model):
for i in range(len(model)):
ml=model[i]
ml.fit(X,target)
pred=ml.predict(X)
acc_score=accuracy_score(pd.DataFrame(ml.predict(X)),target)
tuple_l=(ml.__class__.__name__,acc_score)
dic_data[ml.__class__.__name__]=[acc_score,ml]
list1.append(tuple_l)
print(dic_data)
for name,val in dic_data.items():
if val==max(dic_data.values()):
max_lis=[name,val]
print('Maximum classifier',name,val)
return list1,max_lis
list1,max_lis=data_modeling(train_merge4,target,[AdaBoostClassifier(),KNeighborsClassifier(),
svm.SVC(),RandomForestClassifier(),
tree.DecisionTreeClassifier(),
GaussianNB(),
LogisticRegression(),
ensemble.GradientBoostingClassifier()])
# In[ ]:
model=max_lis[1][1]
# ## **Model score Visualization**
# In[ ]:
modelscore_df=pd.DataFrame(list1,columns=['Classifier',"Accuracy score"])
# In[ ]:
modelscore_df
# In[ ]:
modelscore_df['classifier code']=np.arange(8)
# In[ ]:
modelscore_df
# In[ ]:
modelscore_df.shape[0]
# ### ** Classifier selection **
# In[ ]:
clf_sel=modelscore_df.iloc[modelscore_df['Accuracy score'].idxmax()]
clf_name=clf_sel[0]
# In[ ]:
modelscore_df.plot.bar(x='classifier code', y='Accuracy score', rot=0)
# ### **Submission file generation**
# In[ ]:
predict_test=rf.predict_proba(test_merge4)
pred_df= | pd.DataFrame(predict_test,columns=['predict_0', 'predict_1', 'predict_2']) | pandas.DataFrame |
"""DataFrameToMatrix: Convert a DataFrame to a Numpy Matrix (ndarray) Class"""
from __future__ import print_function
# Third Party
import pandas as pd
import numpy as np
# Local imports
from bat.utils import dummy_encoder
class DataFrameToMatrix(object):
"""DataFrameToMatrix: Convert a DataFrame to a Numpy Matrix (ndarray) Class
Notes:
fit_transform: Does a fit and a transform and returns the transformed matrix
transform: Based on previous fit parameters returns the transformed matrix
"""
def __init__(self):
"""Initialize the DataFrameToMatrix class"""
self.column_names = None
self.cat_columns = None
self.normalize = True
self.norm_map = {}
self.dummy_encoder = dummy_encoder.DummyEncoder()
def fit_transform(self, input_df, normalize=True):
"""Convert the dataframe to a matrix (numpy ndarray)
Args:
input_df (dataframe): The dataframe to convert
normalize (bool): Boolean flag to normalize numeric columns (default=True)
"""
# Shallow copy the dataframe (we'll be making changes to some columns)
_df = input_df.copy(deep=False)
# Set class variables that will be used both now and later for transform
self.normalize = normalize
# Convert columns that are probably categorical
self.convert_to_categorical(_df)
# First check for columns that are explicitly categorical
self.cat_columns = _df.select_dtypes(include=['category']).columns.tolist()
# Remove any columns that aren't bool/int/float/category
_df = _df.select_dtypes(include=['bool', 'int', 'float', 'category'])
# Normalize any numeric columns if normalize specified
if self.normalize:
for column in list(_df.select_dtypes(include=[np.number]).columns.values):
print('Normalizing column {:s}...'.format(column))
_df[column], _min, _max = self._normalize_series(_df[column])
self.norm_map[column] = (_min, _max)
# Now that categorical columns are setup call the dummy_encoder
return self.dummy_encoder.fit_transform(_df)
def transform(self, input_df):
"""Convert the dataframe to a matrix (numpy ndarray)
Args:
input_df (dataframe): The dataframe to convert
"""
# Shallow copy the dataframe (we'll be making changes to some columns)
_df = input_df.copy(deep=False)
# Convert all columns that are/should be categorical
for column in self.cat_columns:
# Sanity check
if column not in _df:
raise RuntimeError('Required column {:s} not found'.format(column))
# If the column isn't already a category then change it
if _df[column].dtype == 'object':
print('Changing column {:s} to category'.format(column))
_df[column] = pd.Categorical(_df[column])
# Remove any columns that aren't bool/int/float/category
_df = _df.select_dtypes(include=['bool', 'int', 'float', 'category'])
# Normalize any numeric columns if normalize specified
if self.normalize:
for column in list(_df.select_dtypes(include=[np.number]).columns.values):
print('Normalizing column {:s}...'.format(column))
smin, smax = self.norm_map[column]
_df[column] = (_df[column] - smin) / (smax - smin)
# Now that categorical columns are setup call the dummy_encoder
return self.dummy_encoder.transform(_df)
@staticmethod
def convert_to_categorical(df):
"""Run a heuristic on the columns of the dataframe to determine whether it contains categorical values
if the heuristic decides it's categorical then the type of the column is changed
Args:
df (dataframe): The dataframe to check for categorical data
"""
might_be_categorical = df.select_dtypes(include=[object]).columns.tolist()
for column in might_be_categorical:
if df[column].nunique() < 20:
# Convert the column
print('Changing column {:s} to category...'.format(column))
df[column] = | pd.Categorical(df[column]) | pandas.Categorical |
from collections import OrderedDict
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.core.construction import create_series_with_explicit_dtype
class TestFromDict:
# Note: these tests are specific to the from_dict method, not for
# passing dictionaries to DataFrame.__init__
def test_from_dict_scalars_requires_index(self):
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
def test_constructor_list_of_odicts(self):
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
result = DataFrame(data)
expected = DataFrame.from_dict(
dict(zip(range(len(data)), data)), orient="index"
)
tm.assert_frame_equal(result, expected.reindex(result.index))
def test_constructor_single_row(self):
data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex(
result.index
)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
]
sdict = OrderedDict(zip(["x", "y"], data))
idx = Index(["a", "b", "c"])
# all named
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx, name="y"),
]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [
| Series([1.5, 3, 4], idx, dtype="O", name="x") | pandas.Series |
from pprint import pprint
import joblib
import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.model_selection import train_test_split
from cian_similarity.utils import calc_metrics, get_connection, get_features, get_offers, get_pairs
class Model:
RANDOM_STATE_SKLEARN = 42
TARGET = "resolution"
def __init__(self, model_path=None):
self._conn = None
self.clf = LGBMClassifier()
if model_path is not None:
self.load(model_path)
def _init(self):
left_right = set((self.pairs.offer_id1 + self.pairs.offer_id2).values)
right_left = set((self.pairs.offer_id2 + self.pairs.offer_id1).values)
print("All pairs are unique:\t", (len(left_right.union(right_left)) / 2) == len(left_right))
print("Mean prediction:\t", self.pairs.resolution.mean())
print("Pairs shape:\t\t", self.pairs.shape)
def train(self) -> None:
X = self.pairs.apply(self.get_residual, axis=1)
X = X.join(self.pairs[[self.TARGET]])
X_train, X_test = train_test_split(X, stratify=X.resolution, random_state=self.RANDOM_STATE_SKLEARN)
# train / test(==val)
self.y_train = X_train[self.TARGET]
self.y_test = X_test[self.TARGET]
X_train = X_train.loc[:, X_train.columns != self.TARGET]
X_test = X_test.loc[:, X_test.columns != self.TARGET]
self.X_train = X_train
self.X_test = X_test
# model
self.clf.fit(self.X_train, self.y_train)
preds = self.clf.predict(X_test)
pprint(calc_metrics(preds, self.y_test))
def get_residual(self, row: pd.Series) -> pd.Series:
left = self.feats.loc[row.offer_id1]
right = self.feats.loc[row.offer_id2]
residual = abs(left - right)
residual = residual.fillna(-1)
residual["totalarea_diff"] = residual["totalarea"] / max(left["totalarea"], right["totalarea"])
return residual
def get_residual_inference(self, left: pd.Series, right: pd.Series) -> pd.Series:
residual = abs(left - right)
residual = residual.fillna(-1)
residual["totalarea_diff"] = residual["totalarea"] / max(left["totalarea"], right["totalarea"])
return residual
def predict(self, features: pd.Series) -> str:
return self.clf.predict_proba()
def save(self, path="model.pkl"):
joblib.dump(self.clf, path)
def load(self, path="model.pkl"):
self.clf = joblib.load(path)
@property
def conn(self):
if self._conn is None:
self._conn = get_connection()
return self._conn
@property
def feature_imporances(self):
return | pd.Series(self.clf.feature_importances_, index=self.X_train.columns) | pandas.Series |
"""convert XML results to CSV data
"""
import os
import xml.etree.ElementTree as ET
from typing import NamedTuple, List
import re
import pandas as pd
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = THIS_DIR
class Catch2BenchResult(NamedTuple):
"""Benchmark result
Attributes:
name: name of benchmark
mean: mean of time [nano seconds]
lower_bound: lower bound of time [nano seconds]
upper_bound: upper bound of time [nano seconds]
"""
name: str
mean: float
lower_bound: float
upper_bound: float
def parse_bench_in_xml(bench_result: ET.Element) -> Catch2BenchResult:
"""Parse a benchmark result in XML
Args:
bench_result (ET.Element): element of BenchmarkResults
Returns:
Catch2BenchResult: parsed data
"""
name = bench_result.attrib['name']
mean_elem = bench_result.find('mean')
mean = mean_elem.attrib['value']
lower_bound = mean_elem.attrib['lowerBound']
upper_bound = mean_elem.attrib['upperBound']
return Catch2BenchResult(name=name, mean=mean, lower_bound=lower_bound, upper_bound=upper_bound)
def parse_xml(filepath: str) -> pd.DataFrame:
"""Parse XML written by Catch2 library
Args:
filepath (str): filepath of XML
Returns:
pd.DataFrame: parsed data
"""
tree = ET.parse(filepath)
root = tree.getroot()
bench_results = []
for group in root.findall('Group'):
for test_case in group.findall('TestCase'):
for bench_result in test_case.findall('BenchmarkResults'):
bench_results.append(parse_bench_in_xml(bench_result))
return pd.DataFrame(
[
(result.name, result.mean, result.lower_bound, result.upper_bound)
for result in bench_results
],
columns=('name', 'mean_ns', 'lower_bound_ns', 'upper_bound_ns')
)
def parse_bench_string() -> pd.DataFrame:
"""Parse benchmark results of strings
Returns:
pd.DataFrame: results
"""
xml_results = parse_xml(DATA_DIR + '/bench_string.xml')
parsed_name = xml_results['name'].str.extract(
R'(?P<procedure>[a-z]*) string\((?P<data_size>\d*)\) with (?P<library>.*)')
bench_results = | pd.concat([parsed_name, xml_results], axis=1) | pandas.concat |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
| tm.assert_series_equal(result[0], expected[0]) | pandas._testing.assert_series_equal |
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn.ensemble import BaggingClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier
from unittest.mock import patch
from zipline.data import bundles
from tests import assert_output, project_test, generate_random_dates, assert_structure
def get_assets(ticker_count):
bundle = bundles.load('eod-quotemedia')
return bundle.asset_finder.retrieve_all(bundle.asset_finder.sids[:ticker_count])
@project_test
def test_train_valid_test_split(fn):
columns = ['test column 1', 'test column 2', 'test column 3']
dates = generate_random_dates(10)
assets = get_assets(3)
index = pd.MultiIndex.from_product([dates, assets])
values = np.arange(len(index) * len(columns)).reshape([len(columns), len(index)]).T
targets = np.arange(len(index))
fn_inputs = {
'all_x': pd.DataFrame(values, index, columns),
'all_y': pd.Series(targets, index, name='target'),
'train_size': 0.6,
'valid_size': 0.2,
'test_size': 0.2}
fn_correct_outputs = OrderedDict([
('X_train', pd.DataFrame(values[:18], index[:18], columns=columns)),
('X_valid', pd.DataFrame(values[18:24], index[18:24], columns=columns)),
('X_test', pd.DataFrame(values[24:], index[24:], columns=columns)),
('y_train', pd.Series(targets[:18], index[:18])),
('y_valid', | pd.Series(targets[18:24], index[18:24]) | pandas.Series |
"""Copyright (c) Facebook, Inc. and its affiliates."""
# pylint: disable=unused-argument,too-many-statements,unused-variable
import functools
import glob
import os
from collections import defaultdict
from pathlib import Path
from typing import List, Optional, Union
import altair as alt
import altair_saver
import numpy as np
import pandas as pd
import typer
from altair.expr import datum
from functional import pseq, seq
from pedroai.io import (
read_json,
read_jsonlines,
requires_file,
requires_files,
safe_file,
)
from pedroai.math import to_precision
from rich.console import Console
from leaderboard.config import conf
from leaderboard.data import (
IrtParsed,
LeaderboardPredictions,
load_squad_submissions,
load_squad_v2,
)
alt.data_transformers.disable_max_rows()
PAPERS_ROOT = Path(os.environ.get("PAPERS_ROOT", "./"))
AUTO_FIG = PAPERS_ROOT / "auto_fig"
COMMIT_AUTO_FIGS = PAPERS_ROOT / "commit_auto_figs"
BASE_SIZE = 150
plot_app = typer.Typer()
console = Console()
def save_chart(chart: alt.Chart, base_path: Union[str, Path], filetypes: List[str]):
if isinstance(base_path, Path):
base_path = str(base_path)
for t in filetypes:
path = base_path + "." + t
if t in ("svg", "pdf"):
method = "node"
else:
method = None
console.log(f"Saving to: {path}")
altair_saver.save(chart, safe_file(path), method=method)
def generate_ablation_files():
ablation_files = {}
for path in glob.glob("data/linear/**/**/**/report.json"):
fields = path.split("/")
irt_family = fields[2]
irt_type = fields[3]
features = fields[4]
if irt_type in ("1PL", "2PL"):
continue
ablation_files[(irt_family, irt_type, features)] = Path(path)
return ablation_files
PLOTS = {}
def register_plot(name: str):
def decorator(func):
PLOTS[name] = func
return func
return decorator
ABLATION_FILES = generate_ablation_files()
def generate_irt_files():
irt_files = {}
for model_type, evaluations in conf["irt"]["squad"]["dev"]["pyro"].items():
for eval_type in ("full", "heldout"):
irt_files[(model_type, eval_type)] = Path(evaluations[eval_type]) / "report.json"
return irt_files
IRT_FILES = generate_irt_files()
def init_score():
return {"tie": 0, "win": 0, "loss": 0}
def run_stats_tournament(fold: str):
test_results = {}
for test in ["mcnemar", "see", "sem", "student_t", "wilcoxon"]:
stats = read_json(f"data/stats/fold={fold}/sampling=random/percent=100/{test}.json")
match_results = defaultdict(init_score)
alpha = 0.01
for r in stats["results"]:
model_a = r["model_a"]
model_b = r["model_b"]
if r["pvalue"] is not None and r["pvalue"] < alpha:
if r["score_a"] > r["score_b"]:
match_results[model_a]["win"] += 1
match_results[model_b]["loss"] += 1
else:
match_results[model_a]["loss"] += 1
match_results[model_b]["win"] += 1
else:
match_results[model_a]["tie"] += 1
match_results[model_b]["tie"] += 1
test_results[test] = match_results
return test_results
@register_plot("rank_correlation_table")
@requires_file(conf["squad"]["dev_to_test"])
def rank_correlation_table(filetypes: List[str], commit: bool = False, include_test: bool = True):
irt_model = "3PL"
dev_irt_params = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["dev"]["pyro"][irt_model]["full"]) / "parameters.json"
)
dev_predictions = LeaderboardPredictions.parse_file(
conf["squad"]["submission_predictions"]["dev"]
)
dev_id_to_subject = load_squad_submissions(dev_predictions)
console.log("N Dev IRT", len(dev_irt_params.model_stats))
stats_results = run_stats_tournament("dev")
mcnemar_results = stats_results["mcnemar"]
see_results = stats_results["see"]
student_t_results = stats_results["student_t"]
sem_results = stats_results["sem"]
if include_test:
mapping = read_json(conf["squad"]["dev_to_test"])
dev_to_test = mapping["dev_to_test"]
test_irt_params = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["test"]["pyro"][irt_model]["full"]) / "parameters.json"
)
console.log("N Test IRT", len(test_irt_params.model_stats))
test_stats_results = run_stats_tournament("test")
test_mcnemar_results = test_stats_results["mcnemar"]
test_see_results = test_stats_results["see"]
test_student_t_results = test_stats_results["student_t"]
test_sem_results = test_stats_results["sem"]
else:
mapping = None
dev_to_test = None
test_irt_params = None
test_stats_results = None
test_mcnemar_results = None
test_see_results = None
test_student_t_results = None
test_sem_results = None
rows = []
n_test = 0
n_dev = 0
for subject_id in dev_id_to_subject.keys():
subject = dev_id_to_subject[subject_id]
entry = {
"subject_id": subject_id,
"name": subject["name"],
"dev_em": subject["dev_em"],
"test_em": subject["test_em"],
"dev_skill": dev_irt_params.model_stats[subject_id].skill,
# "dev_mcnemar": mcnemar_results[subject_id]["win"],
# "dev_see": see_results[subject_id]["win"],
# "dev_student_t": student_t_results[subject_id]["win"],
# "dev_sem": sem_results[subject_id]["win"],
}
n_dev += 1
if include_test:
if subject_id in dev_to_test:
test_subject_id = dev_to_test[subject_id]
if test_subject_id in test_irt_params.model_stats:
entry["test_skill"] = test_irt_params.model_stats[test_subject_id].skill
# entry["test_mcnemar"] = test_mcnemar_results[test_subject_id]["win"]
# entry["test_see"] = test_see_results[test_subject_id]["win"]
# entry["test_student_t"] = test_student_t_results[test_subject_id][
# "win"
# ]
# entry["test_sem"] = test_sem_results[test_subject_id]["win"]
n_test += 1
rows.append(entry)
console.log("N Dev", n_dev, "N Test", n_test)
df = pd.DataFrame(rows).dropna(axis=0)
console.log(df)
name_mapping = {
"dev_em": r"EM$_{\text{dev}}$",
"test_em": r"EM$_{\text{test}}$",
"dev_skill": r"Ability$_{\text{dev}}$",
"test_skill": r"Ability$_{\text{test}}$",
}
correlations = df.corr(method="kendall")
correlations.to_pickle("/tmp/leaderboard_correlations.pickle")
console.log(correlations)
print(
correlations.applymap(lambda n: f"${to_precision(n, 3)}$")
.rename(columns=name_mapping, index=name_mapping)
.to_latex(column_format="l" + len(name_mapping) * "r", escape=False)
)
@register_plot("sampling_stability")
def sample_stability_plot(filetypes: List[str], commit: bool = False):
input_dir = Path(conf["stability"]["sampling"])
random_df = pd.read_json(input_dir / "random_df.json")
irt_df = pd.read_json(input_dir / "irt_df.json")
info_df = pd.read_json(input_dir / "info_df.json")
method_names = {
"dev_high_disc_to_test": "High Discrimination",
"dev_high_diff_to_test": "High Difficulty",
"dev_high_disc_diff_to_test": "High Disc + Diff",
"dev_info_to_test": "High Information",
"dev_random_to_test": "Random",
}
def format_df(dataframe):
return dataframe.assign(
sampling_method=dataframe["variable"].map(lambda v: method_names[v])
)
x_scale = alt.X("trial_size", title="Development Set Sample Size", scale=alt.Scale(type="log"))
y_scale = alt.Scale(zero=False)
color_scale = alt.Color(
"sampling_method",
title="Sampling Method",
legend=alt.Legend(orient="bottom-right", fillColor="white", padding=5, strokeColor="gray"),
sort=[
"High Disc + Diff",
"High Information",
"High Discrimination",
"High Difficulty",
"Random",
],
)
random_line = (
alt.Chart(format_df(random_df))
.mark_line()
.encode(
x=x_scale,
y=alt.Y("mean(value)", scale=y_scale, title="Correlation to Test Rank"),
color=color_scale,
)
)
random_band = (
alt.Chart(format_df(random_df))
.mark_errorband(extent="ci")
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
determ_df = pd.concat([irt_df, info_df])
irt_line = (
alt.Chart(format_df(determ_df))
.mark_line()
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
font_size = 18
chart = (
(random_band + random_line + irt_line)
.configure_axis(labelFontSize=font_size, titleFontSize=font_size)
.configure_legend(
labelFontSize=font_size, titleFontSize=font_size, symbolLimit=0, labelLimit=0,
)
.configure_header(labelFontSize=font_size)
.configure(padding=0)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "sampling_rank", filetypes)
else:
save_chart(chart, AUTO_FIG / "sampling_rank", filetypes)
@register_plot("cat_sampling_stability")
def cat_sample_stability_plot(filetypes: List[str], commit: bool = False):
input_dir = Path(conf["stability"]["cat_sampling"])
random_df = pd.read_json(input_dir / "random_df.json")
irt_df = pd.read_json(input_dir / "irt_df.json")
info_df = pd.read_json(input_dir / "info_df.json")
method_names = {
"dev_high_disc_to_test": "High Discrimination",
"dev_high_diff_to_test": "High Difficulty",
"dev_high_disc_diff_to_test": "High Disc + Diff",
"dev_info_to_test": "High Information",
"dev_random_to_test": "Random",
}
def format_df(dataframe):
return dataframe.assign(
sampling_method=dataframe["variable"].map(lambda v: method_names[v])
)
x_scale = alt.X("trial_size", title="Development Set Sample Size", scale=alt.Scale(type="log"))
y_scale = alt.Scale(zero=False)
color_scale = alt.Color(
"sampling_method",
title="Sampling Method",
legend=alt.Legend(orient="bottom-right", fillColor="white", padding=5, strokeColor="gray"),
sort=[
"High Information",
"High Discrimination",
"High Disc + Diff",
"High Difficulty",
"Random",
],
)
random_line = (
alt.Chart(format_df(random_df))
.mark_line()
.encode(
x=x_scale,
y=alt.Y("mean(value)", scale=y_scale, title="Correlation to Test Rank"),
color=color_scale,
)
)
random_band = (
alt.Chart(format_df(random_df))
.mark_errorband(extent="ci")
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
determ_df = pd.concat([irt_df, info_df])
irt_line = (
alt.Chart(format_df(determ_df))
.mark_line()
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
font_size = 18
chart = (
(random_band + random_line + irt_line)
.configure_axis(labelFontSize=font_size, titleFontSize=font_size)
.configure_legend(
labelFontSize=font_size, titleFontSize=font_size, symbolLimit=0, labelLimit=0,
)
.configure_header(labelFontSize=font_size)
.configure(padding=0)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "cat_sampling_rank", filetypes)
else:
save_chart(chart, AUTO_FIG / "cat_sampling_rank", filetypes)
def label_experiment(label):
if label.startswith("test_"):
return "Dev Sample to Test"
else:
return "Dev Sample to Dev Sample"
def label_sig(fold: str):
if fold == "dev":
return "Dev Sample to Dev Sample"
elif fold == "test":
return "Dev Sample to Test"
else:
raise ValueError(f"Invalid fold: {fold}")
@functools.lru_cache()
def load_test_irt():
test_irt_parsed = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["test"]["pyro"]["3PL"]["full"]) / "parameters.json"
)
test_preds = LeaderboardPredictions.parse_file(conf["squad"]["submission_predictions"]["test"])
mapping = read_json(conf["squad"]["dev_to_test"])
dev_to_test = mapping["dev_to_test"]
def get_test_irt(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_irt_parsed.model_stats:
return test_irt_parsed.model_stats[test_id].skill
else:
return None
else:
return None
def get_test_classical(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_preds.model_scores:
return test_preds.model_scores[test_id]["exact_match"]
else:
return None
else:
return None
return get_test_irt, get_test_classical
def rank_compute_bootstrap_ci(data_path: str, n_trials: int = 1000, fold: str = "dev"):
"""Given stability experiment, compute bootstrapped
confidence intervals, and check if correlations are above 95%
interval.
Args:
data_path (str): Path to dataframe stored in feather format with experiment
"""
df = pd.read_feather(data_path)
size = df["size"].iloc[0]
trial_id = df["trial_id"].iloc[0]
if fold == "test":
get_test_irt, get_test_classical = load_test_irt()
df["b_irt"] = df["subject_id"].map(get_test_irt)
df["b_classical"] = df["subject_id"].map(get_test_classical)
df = df.dropna(0)
real_corr = df.corr(method="kendall")
# Due to not implementing identifiability, IRT scores may be flipped
# Detect that and adjust as necessary
if real_corr["a_irt"].a_classical < 0:
df["a_irt"] = -df["a_irt"]
if real_corr["b_irt"].b_classical < 0:
df["b_irt"] = -df["b_irt"]
real_corr = df.corr(method="kendall")
corr_diff = real_corr["a_irt"].b_irt - real_corr["a_classical"].b_classical
a_classical_scores = df.a_classical.to_numpy()
a_irt_scores = df.a_irt.to_numpy()
indices = np.arange(0, len(a_classical_scores))
# Build up a distribution of score differences
diff_dist = []
# Simulate a bunch of times
n_subjects = len(a_classical_scores)
for _ in range(n_trials):
# Create a new similar DF, except sample with replacement one set of rankings
# Be sure to keep pairs of irt/classical scores together
sample_indices = np.random.choice(indices, n_subjects, replace=True)
sample_classical = a_classical_scores[sample_indices]
sample_irt = a_irt_scores[sample_indices]
sample_df = pd.DataFrame(
{
"subject_id": df["subject_id"],
# I'm not sure doing replacement is correct
# Also not sure if n=161 is correct, seems odd,
# but I'd be worried if I did only 20 that
# the distribution of differences might be different
"a_classical": sample_classical,
"a_irt": sample_irt,
# Keep one ranking the same
"b_classical": df["b_classical"],
"b_irt": df["b_irt"],
}
)
sample_corr = sample_df.corr(method="kendall")
# Grab correlations
irt_corr = sample_corr.loc["a_irt"].b_irt
classical_corr = sample_corr.loc["a_classical"].b_classical
# Record the difference
diff_dist.append(irt_corr - classical_corr)
diff_df = pd.DataFrame({"diff": diff_dist})
# Two tailed test, so divide by two
alpha = 1 - 0.95
lower, upper = diff_df["diff"].quantile([alpha, 1 - alpha])
# significant = bool(corr_diff < lower or upper < corr_diff)
significant = bool(upper < corr_diff)
p_value = 1 - ((diff_df["diff"] < corr_diff).sum() / n_trials)
return {
"significant": significant,
"p_value": float(p_value),
"diff": float(corr_diff),
"irt_corr": float(real_corr["a_irt"].b_irt),
"classical_corr": float(real_corr["a_classical"].b_classical),
"trial_size": int(size),
"trial_id": int(trial_id),
"lower": float(lower),
"upper": float(upper),
"alpha": alpha,
"diff_dist": diff_dist,
}
def process_trial_group(trial_size, trials):
diff_dist = []
for t in trials:
diff_dist.extend(t["diff_dist"])
diff_dist = np.array(diff_dist)
for t in trials:
p_value = 1 - (diff_dist < t["diff"]).mean()
t["total_p_value"] = p_value
yield t
def get_cached_rank_stability_sig(force: bool = False, n_trials: bool = 1000):
input_dir = Path(conf["stability"]["ranking"])
output_path = Path(conf["stability"]["ranking_sig"])
if output_path.exists() and not force:
console.log("Cached ranking stability found")
return pd.read_feather(output_path)
console.log("Cached ranking stability not found, computing...")
console.log("Computing dev results")
dev_results = (
pseq(input_dir.glob("*.feather"))
.map(lambda x: rank_compute_bootstrap_ci(x, n_trials=n_trials, fold="dev"))
.list()
)
console.log("Computing test results")
test_results = (
pseq(input_dir.glob("*.feather"))
.map(lambda x: rank_compute_bootstrap_ci(x, n_trials=n_trials, fold="test"))
.list()
)
dev_processed = (
seq(dev_results)
.group_by(lambda x: x["trial_size"])
.smap(process_trial_group)
.flatten()
.list()
)
test_processed = (
seq(test_results)
.group_by(lambda x: x["trial_size"])
.smap(process_trial_group)
.flatten()
.list()
)
dev_df = pd.DataFrame(dev_processed).drop("diff_dist", axis=1)
dev_df["fold"] = "dev"
test_df = pd.DataFrame(test_processed).drop("diff_dist", axis=1)
test_df["fold"] = "test"
df = | pd.concat([dev_df, test_df]) | pandas.concat |
""" Test cases for misc plot functions """
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.tests.plotting.common import (
TestPlotBase,
_check_plot_works,
)
import pandas.plotting as plotting
@td.skip_if_mpl
def test_import_error_message():
# GH-19810
df = DataFrame({"A": [1, 2]})
with pytest.raises(ImportError, match="matplotlib is required for plotting"):
df.plot()
def test_get_accessor_args():
func = plotting._core.PlotAccessor._get_call_args
msg = "Called plot accessor for type list, expected Series or DataFrame"
with pytest.raises(TypeError, match=msg):
func(backend_name="", data=[], args=[], kwargs={})
msg = "should not be called with positional arguments"
with pytest.raises(TypeError, match=msg):
func(backend_name="", data=Series(dtype=object), args=["line", None], kwargs={})
x, y, kind, kwargs = func(
backend_name="",
data=DataFrame(),
args=["x"],
kwargs={"y": "y", "kind": "bar", "grid": False},
)
assert x == "x"
assert y == "y"
assert kind == "bar"
assert kwargs == {"grid": False}
x, y, kind, kwargs = func(
backend_name="pandas.plotting._matplotlib",
data=Series(dtype=object),
args=[],
kwargs={},
)
assert x is None
assert y is None
assert kind == "line"
assert len(kwargs) == 24
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
def test_autocorrelation_plot(self):
from pandas.plotting import autocorrelation_plot
ser = tm.makeTimeSeries(name="ts")
# Ensure no UserWarning when making plot
with tm.assert_produces_warning(None):
_check_plot_works(autocorrelation_plot, series=ser)
_check_plot_works(autocorrelation_plot, series=ser.values)
ax = autocorrelation_plot(ser, label="Test")
self._check_legend_labels(ax, labels=["Test"])
@pytest.mark.parametrize("kwargs", [{}, {"lag": 5}])
def test_lag_plot(self, kwargs):
from pandas.plotting import lag_plot
ser = tm.makeTimeSeries(name="ts")
_check_plot_works(lag_plot, series=ser, **kwargs)
def test_bootstrap_plot(self):
from pandas.plotting import bootstrap_plot
ser = | tm.makeTimeSeries(name="ts") | pandas._testing.makeTimeSeries |
import sys
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
import pandas as pd
# ------------------------Function to combine df of all gear type-----------------------------
def main(p1, p2, p3, p4, p5, p6, version):
df1 = pd.read_csv('../data/' + p1)
df2 = pd.read_csv('../data/' + p2)
df3 = pd.read_csv('../data/' + p3)
df4 = | pd.read_csv('../data/' + p4) | pandas.read_csv |
import urllib
import urllib.parse
import urllib.request
import json
import pandas as pd
from datetime import datetime
import os
class RetrieveByAttribute(object):
'''
This class extracts Historical user defined weather attributes from the
WorldWeatherOnline API. The data is extracted by city.
-------------------------------Arguments-------------------------------------------
api_key: the API key obtained from 'https://www.worldweatheronline.com/developer/'. (str)
attribute_list: a list of weather attributes to collect. (list)
city: a city for which to retrieve data. (str)
start_date: The date from which to begin data extraction, in the format 'YYYY-mm-dd'. (str)
end_date: The date at which to end data extraction, in the format 'YYYY-mm-dd'. (str)
frequency: the frequency of extracted data, measured in hours. (int)
verbose: boolean determining printing during data extraction. (bool)
csv_directory: an optional file directory to store the output. (os directory)
-------------------------------Returns---------------------------------------------
dataset: a Pandas DataFrame containing the requested weather data. (Pandas DataFrame)
'''
def __init__(self,
api_key,
attribute_list,
city,
start_date,
end_date,
frequency,
verbose = True,
csv_directory = None):
if isinstance(api_key, str) is False:
raise TypeError("The 'api_key' argument must be a string object. \n Please refer to https://www.worldweatheronline.com/developer/ to generate an API key.")
if isinstance(attribute_list, list) is False:
raise TypeError("The 'attribute_list' argument must be a list object.")
if all(isinstance(n, str) for n in lst) is False:
raise ValueError("The 'attribute_list' contents must be string objects.")
if isinstance(city, str) is False:
raise TypeError("The 'city' argument must be a string object.")
if isinstance(start_date, str) is False:
raise TypeError("The start_date argument must be a string object in the format 'YYYY-mm-dd'.")
if isinstance(end_date, str) is False:
raise TypeError("The end_date argument must be an string object in the format 'YYYY-mm-dd'.")
end_date_datetime = datetime.strptime(end_date, '%Y-%m-%d')
start_date_datetime = datetime.strptime(start_date, '%Y-%m-%d')
while start_date_datetime >= end_date_datetime:
raise ValueError("end_date argument cannot occur prior to the start_date argument.")
if isinstance(frequency, int) is False:
raise TypeError("frequency argument must be an integer object.")
while frequency not in [1, 3, 6, 12]:
raise ValueError("frequency argument (hours) must be selected from: 1,3,6,12.")
self.api_key = api_key
self.attribute_list = attribute_list
self.city = city
self.start_date = start_date
self.end_date = end_date
self.start_date_datetime = start_date_datetime
self.end_date_datetime = end_date_datetime
self.frequency = frequency
self.verbose = verbose
self.csv_directory = csv_directory
def _extract_data(self, dataset):
'''
This internal function extracts data from the output of the
_retrieve_this_city internal function below.
-------------------------------Arguments-------------------------------------------
dataset: a json file containing extracted data. (json file)
-------------------------------Returns---------------------------------------------
monthly_data: a Pandas DataFrame containing the requested weather data. (Pandas DataFrame)
'''
number_days = len(dataset)
monthly_data = pd.DataFrame()
for i in range(number_days):
d = dataset[i]
astronomy_data = pd.DataFrame(d['astronomy'])
hourly_data = pd.DataFrame(d['hourly'])
required_keys = ['date', 'maxtempC', 'mintempC', 'totalSnow_cm', 'sunHour', 'uvIndex']
subset_d = dict((k, d[k]) for k in required_keys if k in d)
weather_data = pd.DataFrame(subset_d, index=[0])
data = pd.concat([weather_data.reset_index(drop=True), astronomy_data], axis=1)
data = pd.concat([data, hourly_data], axis=1)
data = data.fillna(method='ffill')
data['time'] = data['time'].apply(lambda x: x.zfill(4))
data['time'] = data['time'].str[:2]
data['date_time'] = pd.to_datetime(data['date'] + ' ' + data['time'])
columns_required = self.attribute_list
columns_required.extend(['date_time'])
data = data[columns_required]
data = data.loc[:,~data.columns.duplicated()]
monthly_data = pd.concat([monthly_data, data])
return (monthly_data)
def _retrieve_this_city(self, city):
'''
This internal function retrieves the data corresponding to the city
specified within the input arguments, for the specified frequency between
the start_date_datetime and end_date_datetime arguments.
-------------------------------Arguments-------------------------------------------
city: the city which the user wishes to extract. (string)
-------------------------------Returns---------------------------------------------
historical_data: a Pandas DataFrame containing the requested historical data. (Pandas DataFrame)
'''
start_time = datetime.now()
list_month_begin = pd.date_range(self.start_date, self.end_date, freq = 'MS', closed = 'right')
list_month_begin = pd.concat([pd.Series(pd.to_datetime(self.start_date)), pd.Series(list_month_begin)], ignore_index = True)
list_month_end = | pd.date_range(self.start_date_datetime, self.end_date_datetime, freq='M', closed='left') | pandas.date_range |
import pandapower as pp
from pandapower.grid_equivalents.auxiliary import drop_internal_branch_elements
import pandas as pd
import numpy as np
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def _calculate_ward_and_impedance_parameters(Ybus_eq, bus_lookups, power_eq=0):
"""calculates the wards and equivalente impedance to represente the
external network"""
# --- calculate ward paramter
b_buses_ppc = bus_lookups["bus_lookup_ppc"]["b_area_buses"]
b_buses_pd = bus_lookups["bus_lookup_pd"]["b_area_buses"]
nb_b_buses_ppc = len(b_buses_ppc)
ward_parameter = pd.DataFrame(columns=["bus_pd", "bus_ppc", "shunt", "power_eq"])
ward_parameter["bus_ppc"] = b_buses_ppc
ward_parameter["bus_pd"] = b_buses_pd
ward_parameter["shunt"] = Ybus_eq.sum(axis=1)[-nb_b_buses_ppc:]
ward_parameter["power_eq"] = 0 + 1j*0 # power_eq.power_eq.values
# --- calculate impedance paramter
params = Ybus_eq[-nb_b_buses_ppc:, -nb_b_buses_ppc:]
nl = (nb_b_buses_ppc) * (nb_b_buses_ppc - 1) // 2
impedance_parameter = pd.DataFrame(
np.arange(nl * 6).reshape((nl, 6)), columns=["from_bus", "to_bus", "rft_pu", "xft_pu",
"rtf_pu", "xtf_pu"], dtype=float)
k = 0
for i in range(nb_b_buses_ppc):
for j in range(nb_b_buses_ppc):
if j > i:
if np.abs(params[i, j]) > 1e-10:
impedance_parameter.from_bus[k] = b_buses_pd[i]
impedance_parameter.to_bus[k] = b_buses_pd[j]
impedance_parameter.rft_pu[k] = (-1 / params[i, j]).real
impedance_parameter.xft_pu[k] = (-1 / params[i, j]).imag
impedance_parameter.rtf_pu[k] = (-1 / params[j, i]).real
impedance_parameter.xtf_pu[k] = (-1 / params[j, i]).imag
k += 1
else:
impedance_parameter = impedance_parameter[:-1]
return ward_parameter, impedance_parameter
def _calculate_xward_and_impedance_parameters(net_external, Ybus_eq, bus_lookups, power_eq=0):
"""calculates the xwards and the equivalent impedance"""
xward_parameter, impedance_parameter = \
_calculate_ward_and_impedance_parameters(Ybus_eq, bus_lookups)
xward_parameter["r_ohm"] = 0
xward_parameter["x_ohm"] = -1/xward_parameter.shunt.values.imag * \
net_external.sn_mva/2
# np.square(net_external.bus.vn_kv[xward_parameter.bus_pd.values].values) / \
# net_external.sn_mva/2
xward_parameter["vm_pu"] = net_external.res_bus.vm_pu[xward_parameter.bus_pd.values].values
return xward_parameter, impedance_parameter
def create_passive_external_net_for_ward_addmittance(net, all_external_buses,
boundary_buses,
calc_volt_angles=True):
"""
This function replace the wards and xward in external network by internal
elements, and replace the power injections in external area by shunts
if necessary.
INPUT:
**net** - The pandapower format network
**all_external_buses** (list) - list of all external buses
**boundary_buses** (list) - list of boundary bus indices, by which the
original network are divide into an internal area and an external
area
"""
# --- replace power injection in external net by shunts to creat a passiv network
v_m = net.res_bus.vm_pu[all_external_buses].values
current_injections = (net.res_bus.p_mw[all_external_buses].values -
1j * net.res_bus.q_mvar[all_external_buses].values) / net.sn_mva
shunt_params = list(current_injections / v_m**2)
# creats shunts
for i in range(len(all_external_buses)):
if abs(np.nan_to_num(shunt_params[i])) != 0:
pp.create_shunt(net, all_external_buses[i], -shunt_params[i].imag,
shunt_params[i].real)
# drops all power injections
for elm in ["sgen", "gen", "load", "storage"]:
target_idx = net[elm].index[net[elm].bus.isin(all_external_buses)]
net[elm].drop(target_idx, inplace=True)
pp.runpp(net, calculate_voltage_angles=calc_volt_angles)
def _replace_external_area_by_wards(net_external, bus_lookups, ward_parameter_no_power,
impedance_parameter, ext_buses_with_xward,
calc_volt_angles=True):
"""replaces the external networks by wards and equivalent impedance"""
# --- drop all external elements
e_buses_pd = bus_lookups["bus_lookup_pd"]["e_area_buses"]
pp.drop_buses(net_external, e_buses_pd)
drop_internal_branch_elements(net_external, bus_lookups["boundary_buses_inclusive_bswitch"])
# pp.runpp(net_external, calculate_voltage_angles=True)
# --- drop shunt elements attached to boundary buses
traget_shunt_idx = net_external.shunt.index[net_external.shunt.bus.isin(bus_lookups[
"boundary_buses_inclusive_bswitch"])]
net_external.shunt.drop(traget_shunt_idx, inplace=True)
# --- creat impedance
sn = net_external.sn_mva
for idx in impedance_parameter.index:
from_bus = impedance_parameter.from_bus[idx]
to_bus = impedance_parameter.to_bus[idx]
if abs(impedance_parameter.rft_pu[idx]) > 1e-8 or \
abs(impedance_parameter.xft_pu[idx]) > 1e-8 or \
abs(impedance_parameter.rtf_pu[idx]) > 1e-8 or \
abs(impedance_parameter.xtf_pu[idx]) > 1e-8:
pp.create_impedance(net_external, from_bus, to_bus,
impedance_parameter.rft_pu[idx],
impedance_parameter.xft_pu[idx],
sn_mva=sn,
rtf_pu=impedance_parameter.rtf_pu[idx],
xtf_pu=impedance_parameter.xtf_pu[idx],
name="eq_impedance")
else:
pp.create_switch(net_external, from_bus, to_bus, "b", name="eq_switch")
# --- creata ward
for i in ward_parameter_no_power.index:
target_bus = ward_parameter_no_power.bus_pd[i]
pp.create_ward(net_external, target_bus,
0.0, # np.nan_to_num(-ward_parameter.power_eq[i].real),
0.0, # np.nan_to_num(-ward_parameter.power_eq[i].imag),
ward_parameter_no_power.shunt[i].real * sn, # / (net_external.res_bus.vm_pu[target_bus] ** 2),
-ward_parameter_no_power.shunt[i].imag * sn, # / (net_external.res_bus.vm_pu[target_bus] ** 2),
name="network_equivalent")
eq_power = net_external.res_ext_grid.copy()
eq_power["bus"] = net_external.ext_grid.bus.values
eq_power["elm"] = "ext_grid"
slack_gen = net_external.gen.index[net_external.gen.slack==True]
if len(slack_gen) != 0:
for i in slack_gen:
new_eq_power = \
[net_external.res_gen.p_mw[i], net_external.res_gen.q_mvar[i],\
net_external.gen.bus[i], "gen"]
eq_power.loc[len(eq_power)] = new_eq_power
assert len(eq_power.bus) == len(set(eq_power.bus)) # only one slack at individual bus
pp.runpp(net_external, calculate_voltage_angles=calc_volt_angles)
eq_power.p_mw -= \
pd.concat([net_external.res_ext_grid.p_mw, net_external.res_gen.p_mw[slack_gen]])
eq_power.q_mvar -= \
pd.concat([net_external.res_ext_grid.q_mvar, net_external.res_gen.q_mvar[slack_gen]])
for bus in eq_power.bus:
net_external.ward.ps_mw[net_external.ward.bus==bus] = \
eq_power.p_mw[eq_power.bus==bus].values
net_external.ward.qs_mvar[net_external.ward.bus==bus] = \
eq_power.q_mvar[eq_power.bus==bus].values
net_external.poly_cost = net_external.poly_cost[0:0]
net_external.pwl_cost = net_external.pwl_cost[0:0]
if len(ext_buses_with_xward):
pp.drop_buses(net_external,
net_external.bus.index.tolist()[-(len(ext_buses_with_xward)):])
# net_external.ward.qs_mvar[i] = eq_power.q_mvar[
# net_external.ext_grid.bus == ward_parameter_no_power.bus_pd[i]]
def _replace_external_area_by_xwards(net_external, bus_lookups, xward_parameter_no_power,
impedance_parameter, ext_buses_with_xward,
calc_volt_angles=True):
"""replaces the external networks by xwards and equivalent impedance"""
# --- drop all external elements
e_buses_pd = bus_lookups["bus_lookup_pd"]["e_area_buses"]
pp.drop_buses(net_external, e_buses_pd)
drop_internal_branch_elements(net_external, bus_lookups["boundary_buses_inclusive_bswitch"])
# --- drop shunt elements attached to boundary buses
traget_shunt_idx = net_external.shunt.index[net_external.shunt.bus.isin(bus_lookups[
"boundary_buses_inclusive_bswitch"])]
net_external.shunt.drop(traget_shunt_idx, inplace=True)
# --- creat impedance
sn = net_external.sn_mva
for idx in impedance_parameter.index:
from_bus = impedance_parameter.from_bus[idx]
to_bus = impedance_parameter.to_bus[idx]
if abs(impedance_parameter.rft_pu[idx]) > 1e-8 or \
abs(impedance_parameter.xft_pu[idx]) > 1e-8 or \
abs(impedance_parameter.rtf_pu[idx]) > 1e-8 or \
abs(impedance_parameter.xtf_pu[idx]) > 1e-8:
pp.create_impedance(net_external, from_bus, to_bus,
impedance_parameter.rft_pu[idx],
impedance_parameter.xft_pu[idx],
sn_mva=net_external.sn_mva,
rtf_pu=impedance_parameter.rtf_pu[idx],
xtf_pu=impedance_parameter.xtf_pu[idx],
name="eq_impedance")
else:
pp.create_switch(net_external, from_bus, to_bus, "b", name="eq_switch")
# --- creata xward
for i in xward_parameter_no_power.index:
target_bus = xward_parameter_no_power.bus_pd[i]
pp.create_xward(net_external, target_bus,
0.0, # np.nan_to_num(-xward_parameter.power_eq[i].real),
0.0, # np.nan_to_num(-xward_parameter.power_eq[i].imag),
xward_parameter_no_power.shunt[i].real * sn,
0.0,
xward_parameter_no_power.r_ohm[i],
np.nan_to_num(xward_parameter_no_power.x_ohm[i]), # neginf=1e100 is commented since this led to error
xward_parameter_no_power.vm_pu[i],
name="network_equivalent")
eq_power = net_external.res_ext_grid.copy()
eq_power["bus"] = net_external.ext_grid.bus.values
eq_power["elm"] = "ext_grid"
slack_gen = net_external.gen.index[net_external.gen.slack==True]
if len(slack_gen) != 0:
for i in slack_gen:
new_eq_power = \
[net_external.res_gen.p_mw[i], net_external.res_gen.q_mvar[i],\
net_external.gen.bus[i], "gen"]
eq_power.loc[len(eq_power)] = new_eq_power
assert len(eq_power.bus) == len(set(eq_power.bus)) # only one slack at individual bus
pp.runpp(net_external, calculate_voltage_angles=calc_volt_angles,
tolerance_mva=1e-6, max_iteration=100)
eq_power.p_mw -= \
| pd.concat([net_external.res_ext_grid.p_mw, net_external.res_gen.p_mw[slack_gen]]) | pandas.concat |
import os
import pandas as pd
import numpy as np
import copy
from pprint import pprint
def work(pres):
count = [0, 0]
for i in pres:
count[i] += 1
out = count.index(max(count))
return out
def simple_vote(model_name, date, dataset, pseudo=False):
if pseudo:
DATA_DIR = '../predict_data/{}_{}_pseudo/{}/'.format(model_name, date, dataset)
else:
DATA_DIR = '../predict_data/{}_{}/{}/'.format(model_name, date, dataset)
files = os.listdir(DATA_DIR)
files = [i for i in files]
i = 0
for fname in files:
tmp_df = pd.read_csv(DATA_DIR + fname)
tmp_df = pd.DataFrame(tmp_df, columns=['ID', 'Label'])
if i == 0:
df_merged = pd.read_csv(DATA_DIR + fname)
df_merged = pd.DataFrame(df_merged, columns=['ID', 'Label'])
if i > 0:
df_merged = df_merged.merge(tmp_df, how='left', on='ID')
print(df_merged.shape)
i += 1
tmp_label = np.array(df_merged.iloc[:, 1:])
voted_label = [work(line) for line in tmp_label]
df_summit = df_merged[['ID']]
df_summit = df_summit.copy()
df_summit['Label'] = voted_label
if pseudo:
save_path = '../predict_data/{}_{}_pseudo/vote'.format(model_name, date)
else:
save_path = '../predict_data/{}_{}/vote'.format(model_name, date)
if not os.path.exists(save_path):
os.makedirs(save_path, mode=0o777)
file_path = '{}/{}-{}-voted.csv'.format(save_path, model_name, dataset)
df_summit.to_csv(file_path, index=None)
print("写入成功!")
def aug_vote(model_name, date, dataset, pseudo=False):
if pseudo:
DATA_DIR = '../predict_data/aug_data/{}_{}_pseudo/{}/'.format(model_name, date, dataset)
else:
DATA_DIR = '../predict_data/aug_data/{}_{}/{}/'.format(model_name, date, dataset)
files = os.listdir(DATA_DIR)
files = [i for i in files]
i = 0
for fname in files:
tmp_df = pd.read_csv(DATA_DIR + fname)
tmp_df = pd.DataFrame(tmp_df, columns=['ID', 'Label'])
if i == 0:
df_merged = pd.read_csv(DATA_DIR + fname)
df_merged = pd.DataFrame(df_merged, columns=['ID', 'Label'])
if i > 0:
df_merged = df_merged.merge(tmp_df, how='left', on='ID')
print(df_merged.shape)
i += 1
df_data = pd.read_csv('../data/test_data/cn_test.csv', sep=',')
# df_data = pd.read_csv('../data/test_data/cn_test.csv', sep='\t', names=["ID", "Speaker", "Sentence"])
ID_list = [i for i in range(df_data.shape[0])]
df_data['ID'] = pd.Series(ID_list)
df_merged = df_merged.merge(df_data, how='left', on='ID')
speaker_list, sentence_list, label_list = [], [], []
humor_speaker_list, humor_sentence_list, humor_label_list = [], [], []
un_speaker_list, un_sentence_list, un_label_list = [], [], []
for index, line in df_merged.iterrows():
label_1 = int(line[1])
label_2 = int(line[2])
label_3 = int(line[3])
label_4 = int(line[4])
label_5 = int(line[5])
speaker = line[8]
sentence = line[9]
label = None
if label_1 + label_2 + label_3 + label_4 + label_5 == 5:
label = 1
humor_speaker_list.append(speaker)
humor_sentence_list.append(sentence)
humor_label_list.append(label)
elif label_1 == label_2 == label_3 == label_4 == label_5 == 0:
label = 0
un_speaker_list.append(speaker)
un_sentence_list.append(sentence)
un_label_list.append(label)
if label is not None:
speaker_list.append(speaker)
sentence_list.append(sentence)
label_list.append(label)
print(len(speaker_list), len(sentence_list), len(label_list))
print(len(humor_speaker_list), len(humor_sentence_list), len(humor_label_list))
print(len(un_speaker_list), len(un_sentence_list), len(un_label_list))
idx_list = [i for i in range(len(speaker_list))]
humor_idx_list = [i for i in range(len(humor_speaker_list))]
un_idx_list = [i for i in range(len(un_speaker_list))]
# * tsv格式
final_data = list(zip(idx_list, speaker_list, sentence_list, label_list))
final_data = pd.DataFrame(final_data, columns=['ID', 'Speaker', 'Sentence', 'Label'])
humor_final_data = list(zip(humor_idx_list, humor_speaker_list, humor_sentence_list, humor_label_list))
humor_final_data = pd.DataFrame(humor_final_data, columns=['ID', 'Speaker', 'Sentence', 'Label'])
un_final_data = list(zip(un_idx_list, un_speaker_list, un_sentence_list, un_label_list))
un_final_data = pd.DataFrame(un_final_data, columns=['ID', 'Speaker', 'Sentence', 'Label'])
# * csv格式
# final_data = list(zip(idx_list, idx_list, idx_list, speaker_list, sentence_list, label_list))
# final_data = pd.DataFrame(final_data, columns=['ID', 'Dialogue_id', 'Utterance_id', 'Speaker', 'Sentence', 'Label'])
if pseudo:
save_path = '../predict_data/aug_data/{}_{}_pseudo/vote'.format(model_name, date)
else:
save_path = '../predict_data/aug_data/{}_{}/vote'.format(model_name, date)
humor_save_path = '../predict_data/aug_data/{}_{}/humor_vote'.format(model_name, date)
un_save_path = '../predict_data/aug_data/{}_{}/un_vote'.format(model_name, date)
if not os.path.exists(save_path):
os.makedirs(save_path, mode=0o777)
os.makedirs(humor_save_path, mode=0o777)
os.makedirs(un_save_path, mode=0o777)
file_path = '{}/{}-{}-voted.tsv'.format(save_path, model_name, dataset)
humor_file_path = '{}/{}-{}-voted.tsv'.format(humor_save_path, model_name, dataset)
un_file_path = '{}/{}-{}-voted.tsv'.format(un_save_path, model_name, dataset)
# * tsv格式
final_data.to_csv(file_path, index=None, header=None, sep='\t')
humor_final_data.to_csv(humor_file_path, index=None, header=None, sep='\t')
un_final_data.to_csv(un_file_path, index=None, header=None, sep='\t')
# * csv格式
# final_data.to_csv(file_path, header=None)
# humor_final_data.to_csv(humor_file_path, header=None)
# un_final_data.to_csv(un_file_path, header=None)
print("写入成功!")
def prob_vote(model_name, date, dataset, pseudo=False):
if pseudo:
DATA_DIR = '../predict_data/{}_{}_pseudo/{}/'.format(model_name, date, dataset)
else:
DATA_DIR = '../predict_data/{}_{}/{}/'.format(model_name, date, dataset)
files = os.listdir(DATA_DIR)
files = [i for i in files]
i = 0
for fname in files:
tmp_df = | pd.read_csv(DATA_DIR + fname) | pandas.read_csv |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = | to_numeric(s, errors='coerce') | pandas.to_numeric |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level.
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provide expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.expanding.aggregate
DataFrame.rolling.aggregate
DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
def count(self, **kwargs):
return super().count(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_expanding_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show an expanding calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))
4.999874
>>> s = pd.Series(arr)
>>> s.expanding(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 4.999874
dtype: float64
"""
)
@ | Appender(_agg_doc) | pandas.util._decorators.Appender |
# -*- coding: utf-8 -*-
from unittest import TestCase
import pandas as pd
from alphaware.base import (Factor,
FactorContainer)
from alphaware.enums import (FactorType,
OutputDataFormat,
FreqType,
FactorNormType)
from alphaware.analyzer import FactorQuantile
from pandas.util.testing import assert_frame_equal
class TestFactorQuantile(TestCase):
def test_factor_quantile(self):
index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28', '2014-03-31'], ['001', '002']],
names=['trade_date', 'ticker'])
data1 = pd.DataFrame(index=index, data=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
factor_test1 = Factor(data=data1, name='alpha1')
factor_test3 = Factor(data=data1, name='alpha2')
test2_property = {'type': FactorType.FWD_RETURN,
'data_format': OutputDataFormat.MULTI_INDEX_DF,
'norm_type': FactorNormType.Null,
'freq': FreqType.EOM}
data2 = pd.DataFrame(index=index, data=[3.0, 2.0, 3.0, 7.0, 8.0, 9.0])
factor_test2 = Factor(data=data2, name='fwd_return1', property_dict=test2_property)
factor_test4 = Factor(data=data2, name='fwd_return2', property_dict=test2_property)
fc = FactorContainer('2014-01-30', '2014-02-28', [factor_test1, factor_test2, factor_test3, factor_test4])
t = FactorQuantile(quantiles=2)
calculate = t.predict(fc)
expected = pd.DataFrame(
data=[[3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0], [3.0, 7.0, 3.0, 7.0, 3.0, 7.0, 3.0, 7.0]],
index=pd.DatetimeIndex(['2014-01-30', '2014-02-28'], freq=None),
columns=['alpha1_fwd_return1_1', 'alpha1_fwd_return1_2', 'alpha2_fwd_return1_1', 'alpha2_fwd_return1_2',
'alpha1_fwd_return2_1', 'alpha1_fwd_return2_2', 'alpha2_fwd_return2_1',
'alpha2_fwd_return2_2'])
| assert_frame_equal(calculate, expected) | pandas.util.testing.assert_frame_equal |
"""
Class Features
Name: lib_data_io_nc
Author(s): <NAME> (<EMAIL>)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Libraries
import logging
import os
import netCDF4
import time
import re
import warnings
import numpy as np
import xarray as xr
import pandas as pd
from copy import deepcopy
from hmc.algorithm.io.lib_data_io_generic import reshape_var3d, create_darray_3d, create_darray_2d
from hmc.algorithm.default.lib_default_args import logger_name, time_units, time_calendar, time_format_algorithm
from hmc.algorithm.utils.lib_utils_system import create_folder
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to write collections
def write_collections(file_name, file_data, file_time, file_attrs=None):
time_date_list = []
time_str_list = []
for time_stamp_step in file_time:
time_str_step = time_stamp_step.strftime(format=time_format_algorithm)
time_str_list.append(time_str_step)
time_date_step = time_stamp_step.to_pydatetime()
time_date_list.append(time_date_step)
# File operation(s)
file_handle = netCDF4.Dataset(file_name, 'w')
file_handle.createDimension('time', len(file_time))
# File attribute(s)
if file_attrs is not None:
for attr_key, attr_value in file_attrs.items():
file_handle.setncattr(attr_key, attr_value)
# Time information
file_time_num = file_handle.createVariable(varname='time', dimensions=('time',), datatype='float32')
file_time_num[:] = netCDF4.date2num(time_date_list, units=time_units, calendar=time_calendar)
file_time_str = file_handle.createVariable(varname='times', dimensions=('time',), datatype='str')
file_time_str[:] = np.array(time_str_list, dtype=object)
# Add file creation date
file_handle.file_date = 'Created ' + time.ctime(time.time())
for file_key, file_dict in file_data.items():
file_values = list(file_dict.values())
if isinstance(file_values[0], str):
file_data = np.array(file_values, dtype=object)
file_var = file_handle.createVariable(varname=file_key, dimensions=('time',), datatype='str')
elif isinstance(file_values[0], (int, float)):
file_data = file_values
file_var = file_handle.createVariable(varname=file_key, dimensions=('time',), datatype='f4')
else:
log_stream.error(' ===> Variable format in collections is not allowed!')
raise IOError('Bad format of array')
file_var[:] = file_data
file_handle.close()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read data
def read_data(file_name_list, var_name=None, var_time_start=None, var_time_end=None, var_time_freq='H',
coord_name_time='time', coord_name_geo_x='Longitude', coord_name_geo_y='Latitude',
dim_name_time='time', dim_name_geo_x='west_east', dim_name_geo_y='south_north'):
# File n
file_n = file_name_list.__len__()
if var_name is None:
log_stream.error(' ===> Variable name is undefined!')
raise IOError('Variable name is a mandatory argument!')
else:
if isinstance(var_name, list):
var_name = var_name[0]
file_check_list = []
for file_name_step in file_name_list:
if os.path.exists(file_name_step):
file_check_list.append(True)
else:
file_check_list.append(False)
file_check = any(el for el in file_check_list)
# Open datasets
if file_check:
if file_n == 1:
datetime_tmp = pd.date_range(start=var_time_start, end=var_time_end, freq=var_time_freq)
datetime_idx_select = pd.DatetimeIndex(datetime_tmp)
if os.path.exists(file_name_list[0]):
try:
dst_tmp = xr.open_dataset(file_name_list[0])
if ('time' not in list(dst_tmp.coords)) and ('time' not in list(dst_tmp.dims)):
log_stream.warning(
' ===> Time dimensions and coordinates are not included in filename \n "' +
file_name_list[0] + '". \n Time dimensions and coordinates will be assigned '
'using the first step of the reference time period "' +
str(datetime_idx_select[0]) + '".\n')
datetime_idx_tmp = pd.DatetimeIndex([datetime_idx_select[0]])
dst_tmp['time'] = datetime_idx_tmp
dst_tmp = dst_tmp.set_coords('time')
if 'time' not in list(dst_tmp.dims):
dst_tmp = dst_tmp.expand_dims('time')
# Check the time steps of datasets and expected and in case of nan's, fill with nearest values
if dst_tmp['time'].__len__() > 1:
datetime_idx_dst_tmp = pd.DatetimeIndex(dst_tmp['time'].values)
datetime_idx_dst_sel = datetime_idx_dst_tmp[(datetime_idx_dst_tmp >= datetime_idx_select[0]) &
(datetime_idx_dst_tmp <= datetime_idx_select[-1])]
if not datetime_idx_select.equals(datetime_idx_dst_sel):
if datetime_idx_select.shape[0] > datetime_idx_dst_sel.shape[0]:
log_stream.warning(
' ===> Datetime detection revealed a different number of time-steps between \n'
'datasets (' + str(datetime_idx_dst_sel.shape[0]) + ' steps) and expected (' +
str(datetime_idx_select.shape[0]) +
' steps) time-series. To avoid undefined values in the datasets time-series procedure '
'automatically filled steps with nearest values. \n')
dst_filled = dst_tmp.reindex({"time": datetime_idx_select}, method="nearest")
else:
log_stream.warning(
' ===> Datetime detection revealed a different number of time-steps between \n'
'datasets (' + str(datetime_idx_dst_sel.shape[0]) + ' steps) and expected (' +
str(datetime_idx_select.shape[0]) + ' steps) time-series. Exit \n')
raise NotImplementedError('Case not implemented yet')
# Update the tmp datasets
dst_tmp = deepcopy(dst_filled)
except BaseException as base_exp:
log_stream.warning(' ===> Exception ' + str(base_exp) + ' occurred in reading netcdf file list')
dst_tmp = xr.open_dataset(file_name_step, decode_times=False)
file_group_match = re.search('\d{4}\d{2}\d{2}\d{2}\d{2}', os.path.split(file_name_step)[1])
file_time_match = file_group_match.group()
file_timestamp_match = pd.Timestamp(file_time_match)
file_datetimeindex_match = pd.DatetimeIndex([file_timestamp_match])
log_stream.warning(
' ===> Automatic datetime detection for filename ' + os.path.split(file_name_step)[1]
+ ' return the following filetime: ' + str(file_timestamp_match))
if 'time' in list(dst_tmp.dims):
dst_tmp = dst_tmp.squeeze(dim_name_time)
dst_tmp['time'] = file_datetimeindex_match
dst_tmp = dst_tmp.set_coords('time')
dst_tmp = dst_tmp.expand_dims('time')
if var_name == 'ALL':
var_list = list(dst_tmp.data_vars)
dst = dst_tmp
elif var_name in list(dst_tmp.data_vars):
var_list = [var_name]
dst = dst_tmp[var_list]
# case for time_step equal to 1
if dst_tmp['time'].shape[0] == 1:
# force time coordinates and dimensions definition
if ('time' not in list(dst.coords)) and ('time' not in list(dst.dims)):
datetime_value_select = dst_tmp['time'].values
datetime_idx_select = pd.DatetimeIndex([datetime_value_select])
dst['time'] = datetime_idx_select
dst = dst_tmp.set_coords('time')
if 'time' not in list(dst.dims):
dst = dst.expand_dims('time')
elif ('time' in list(dst.coords)) and ('time' in list(dst.dims)):
pass
else:
log_stream.error(' ===> Time dimensions and coordinates mode is not allowed.')
raise NotImplementedError('Case not implemented yet')
elif dst_tmp['time'].shape[0] > 1:
pass
else:
log_stream.error(' ===> Time shape is wrongly defined')
raise NotImplemented('Case not implemented yet')
else:
log_stream.warning(' ===> Variable ' + var_name + ' not available in loaded datasets!')
var_list = None
dst = None
else:
log_stream.warning(' ===> File ' + file_name_list[0] + ' not available in loaded datasets!')
elif file_n > 1:
datetime_tmp = pd.date_range(start=var_time_start, end=var_time_end, freq=var_time_freq)
datetime_idx_select = pd.DatetimeIndex(datetime_tmp)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dst_tmp = xr.open_mfdataset(file_name_list, combine='by_coords')
except BaseException as base_exp:
log_stream.warning(' ===> Exception ' + str(base_exp) + ' occurred in reading netcdf file list')
dst_tmp = None
for file_name_step, datetime_idx_step in zip(file_name_list, datetime_idx_select):
if os.path.exists(file_name_step):
dst_step = xr.open_dataset(file_name_step, decode_times=False)
if 'time' in list(dst_step.dims):
dst_step = dst_step.squeeze(dim_name_time)
dst_step['time'] = datetime_idx_step
dst_step = dst_step.set_coords('time')
dst_step = dst_step.expand_dims('time')
if dst_tmp is None:
dst_tmp = deepcopy(dst_step)
else:
dst_tmp = dst_tmp.combine_first(deepcopy(dst_step))
else:
log_stream.warning(' ===> File ' + file_name_step + ' not available in loaded datasets!')
# raise IOError('File not found') # da rivedere nel caso ci siano dati non continui (tipo updating)
if var_name == 'ALL':
var_list = list(dst_tmp.data_vars)
dst = dst_tmp
elif var_name in list(dst_tmp.data_vars):
var_list = [var_name]
dst = dst_tmp[var_list]
else:
log_stream.warning(' ===> Variable ' + var_name + ' not available in loaded datasets!')
var_list = None
dst = None
else:
log_stream.error(' ===> Filename list is not available!')
raise IOError('Filename list is null')
# Check datasets
if dst is not None:
# Get dimensions and coordinates
dst_list_coords = list(dst.coords)
# Get time, geo x and geo y
if coord_name_time in dst_list_coords:
da_time = dst[coord_name_time]
else:
# log_stream.warning(' ===> Time dimension name is not in the variables list of nc file')
if var_time_start == var_time_end:
da_time = | pd.DatetimeIndex([var_time_end]) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 12:05:22 2017
@author: rgryan
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
import datetime
sh = False # Plotting the scale height info?
zc = False # Plotting the zero height concentration info?
re = True # Plotting the a.p. relative error info?
op = False # Plotting a.p. aerosol optical properties info?
path = 'E:\\Sciatran2\\AEROSOL_RETRIEVAL_v-1-5\\Campaign\\'
date = '20170307'
time = '130130'
startdate = datetime.datetime(2017, 3, 7, 6)
enddate = datetime.datetime(2017, 3, 7, 20)
tests = ['t103', 't104', 't105', 't106','t102', 't107', 't108', 't109', 't110']
dates = ['20170307','20170308', '20170309']
scale_height = [0.2, 0.4, 0.6, 0.8, 1.0,1.2]
zconc = [0.02, 0.06, 0.08, 0.1, 0.12]
relerror = [0.1, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0]
opprop = [0.3, 0.8, 1.29, 1.8]
values = [0.1, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0]
colours = ['red', 'orange', 'yellow', 'green', 'lightseagreen',
'skyblue', 'mediumblue', 'midnightblue',
'darkviolet', 'darkmagenta', 'magenta', 'pink']
mm_rms = []
aod_ave = []
aod_vals = pd.DataFrame()
aod_err_vals = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import csv
from sklearn.model_selection import train_test_split
import numpy as np
import random
import tensorflow as tf
import torch
#directory of tasks dataset
os.chdir("original_data")
#destination path to create tsv files, dipends on data cutting
path_0 = "mttransformer/data/0"
path_100_no_gan = "mttransformer/data/100/no_gan"
path_200_no_gan = "mttransformer/data/200/no_gan"
path_500_no_gan = "mttransformer/data/500/no_gan"
path_100_gan = "mttransformer/data/100/gan"
path_200_gan = "mttransformer/data/200/gan"
path_500_gan = "mttransformer/data/500/gan"
#if you use a model with gan the flag "apply_gan" is True, else False
apply_gan=False
#data cutting
number_labeled_examples=0 #0-100-200-500
#if you want activate balancing, that is used only in the model Multi-task, MT-DNN and MT-GANBERT
balancing=False
#path train and test dataset of the task
tsv_haspeede_train = 'haspeede_TW-train.tsv'
tsv_haspeede_test = 'haspeede_TW-reference.tsv'
tsv_AMI2018_train = 'AMI2018_it_training.tsv'
tsv_AMI2018_test = 'AMI2018_it_testing.tsv'
tsv_AMI2018_train = 'AMI2018_it_training.tsv'
tsv_AMI2018_test = 'AMI2018_it_testing.tsv'
tsv_DANKMEMES2020_train = 'dankmemes_task2_train.csv'
tsv_DANKMEMES2020_test = 'hate_test.csv'
tsv_SENTIPOLC2016_train = 'training_set_sentipolc16.csv'
tsv_SENTIPOLC2016_test = 'test_set_sentipolc16_gold2000.csv'
tsv_SENTIPOLC2016_train = 'training_set_sentipolc16.csv'
tsv_SENTIPOLC2016_test = 'test_set_sentipolc16_gold2000.csv'
#Upload the dataset of all task as dataframes
#haspeede_TW
df_train = pd.read_csv(tsv_haspeede_train, delimiter='\t', names=('id','sentence','label'))
df_train = df_train[['id']+['label']+['sentence']]
df_test = pd.read_csv(tsv_haspeede_test, delimiter='\t', names=('id','sentence','label'))
df_test = df_test[['id']+['label']+['sentence']]
#AMI2018A
df_train2 = pd.read_csv(tsv_AMI2018_train, delimiter='\t')
df_train2 = df_train2[['id']+['misogynous']+['text']]
df_test2 = pd.read_csv(tsv_AMI2018_test, delimiter='\t')
df_test2 = df_test2[['id']+['misogynous']+['text']]
#AMI2018B
df_train3 = pd.read_csv(tsv_AMI2018_train, delimiter='\t')
df = pd.DataFrame(columns=['id', 'misogyny_category', 'text'])
for ind in df_train3.index:
if df_train3.misogynous[ind]==1:
if df_train3.misogyny_category[ind] == 'stereotype':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 0, 'text' : df_train3['text'][ind] }, ignore_index=True)
#elif df_train3.misogyny_category[ind] == 'dominance':
#df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 1, 'text' : df_train3['text'][ind] }, ignore_index=True)
#elif df_train3.misogyny_category[ind] == 'derailing':
#df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 2, 'text' : df_train3['text'][ind] }, ignore_index=True)
elif df_train3.misogyny_category[ind] == 'sexual_harassment':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 1, 'text' : df_train3['text'][ind] }, ignore_index=True)
elif df_train3.misogyny_category[ind] == 'discredit':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 2, 'text' : df_train3['text'][ind] }, ignore_index=True)
df_train3 = df
df_test3 = pd.read_csv(tsv_AMI2018_test, delimiter='\t')
df = pd.DataFrame(columns=['id', 'misogyny_category', 'text'])
for ind in df_test3.index:
if df_test3.misogynous[ind]==1:
if df_test3.misogyny_category[ind] == 'stereotype':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 0, 'text' : df_test3['text'][ind] }, ignore_index=True)
#elif df_test3.misogyny_category[ind] == 'dominance':
#df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 1, 'text' : df_test3['text'][ind] }, ignore_index=True)
#elif df_test3.misogyny_category[ind] == 'derailing':
#df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 2, 'text' : df_test3['text'][ind] }, ignore_index=True)
elif df_test3.misogyny_category[ind] == 'sexual_harassment':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 1, 'text' : df_test3['text'][ind] }, ignore_index=True)
elif df_test3.misogyny_category[ind] == 'discredit':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 2, 'text' : df_test3['text'][ind] }, ignore_index=True)
df_test3 = df
#DANKMEMES2020
df_train4 = pd.read_csv(tsv_DANKMEMES2020_train, delimiter=',')
df_train4 = df_train4[['File']+['Hate Speech']+['Text']]
df_test4 = pd.read_csv(tsv_DANKMEMES2020_test, delimiter=',')
df_test4 = df_test4[['File']+['Hate Speech']+['Text']]
#SENTIPOLC20161
df_train5 = pd.read_csv(tsv_SENTIPOLC2016_train, delimiter=',')
df_train5 = df_train5[['idtwitter']+['subj']+['text']]
df_test5 = pd.read_csv(tsv_SENTIPOLC2016_test, delimiter=',')
df_test5 = df_test5[['idtwitter']+['subj']+['text']]
for ind in df_train5.index:
if "\t" in df_train5.text[ind]:
df_train5 = df_train5.replace(to_replace='\t', value='', regex=True)
#SENTIPOLC20162
df_train6 = pd.read_csv(tsv_SENTIPOLC2016_train, delimiter=',')
df = pd.DataFrame(columns=['idtwitter', 'polarity', 'text'])
for ind in df_train6.index:
if df_train6['subj'][ind] == 1:
if df_train6['opos'][ind] == 1 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 0, 'text' : df_train6['text'][ind] }, ignore_index=True)
elif df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 1:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 1, 'text' : df_train6['text'][ind] }, ignore_index=True)
elif df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 2, 'text' : df_train6['text'][ind] }, ignore_index=True)
else:
if df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 2, 'text' : df_train6['text'][ind] }, ignore_index=True)
df_train6 = df
for ind in df_train6.index:
if "\t" in df_train6.text[ind]:
df_train6 = df_train6.replace(to_replace='\t', value='', regex=True)
df_test6 = pd.read_csv(tsv_SENTIPOLC2016_test, delimiter=',')
df = pd.DataFrame(columns=['idtwitter', 'polarity', 'text'])
for ind in df_test6.index:
if df_test6['subj'][ind] == 1:
if df_test6['opos'][ind] == 1 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 0, 'text' : df_test6['text'][ind] }, ignore_index=True)
elif df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 1:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 1, 'text' : df_test6['text'][ind] }, ignore_index=True)
elif df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 2, 'text' : df_test6['text'][ind] }, ignore_index=True)
else:
if df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 2, 'text' : df_test6['text'][ind] }, ignore_index=True)
df_test6 = df
#split train dev, in all tasks
train_dataset, dev_dataset = train_test_split(df_train, test_size=0.2, shuffle = True)
train_dataset2, dev_dataset2 = train_test_split(df_train2, test_size=0.2, shuffle = True)
train_dataset3, dev_dataset3 = train_test_split(df_train3, test_size=0.2, shuffle = True)
train_dataset4, dev_dataset4 = train_test_split(df_train4, test_size=0.2, shuffle = True)
train_dataset5, dev_dataset5 = train_test_split(df_train5, test_size=0.2, shuffle = True)
train_dataset6, dev_dataset6 = train_test_split(df_train6, test_size=0.2, shuffle = True)
#reduction of datasets in case of data cutting 100, 200, 500
if number_labeled_examples!=0:
if number_labeled_examples==100:
labeled = train_dataset.sample(n=100)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=100)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=100)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=100)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=100)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=100)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
elif number_labeled_examples==200:
labeled = train_dataset.sample(n=200)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=200)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=200)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=200)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=200)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=200)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
elif number_labeled_examples==500:
labeled = train_dataset.sample(n=500)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=500)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=500)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=500)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=500)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=500)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
#model with or without gan
if apply_gan == True:
print("MT-GANBERT")
#dataset unlabeled with label -1
unlabeled['label'] = unlabeled['label'].replace(0,-1)
unlabeled['label'] = unlabeled['label'].replace(1,-1)
unlabeled2['misogynous'] = unlabeled2['misogynous'].replace(0,-1)
unlabeled2['misogynous'] = unlabeled2['misogynous'].replace(1,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(0,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(1,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(2,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(3,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(4,-1)
unlabeled4['Hate Speech'] = unlabeled4['Hate Speech'].replace(0,-1)
unlabeled4['Hate Speech'] = unlabeled4['Hate Speech'].replace(1,-1)
unlabeled5['subj'] = unlabeled5['subj'].replace(0,-1)
unlabeled5['subj'] = unlabeled5['subj'].replace(1,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(0,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(1,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(2,-1)
train = pd.concat([labeled, unlabeled])
train2 = pd.concat([labeled2, unlabeled2])
train3 = pd.concat([labeled3, unlabeled3])
train4 = pd.concat([labeled4, unlabeled4])
train5 = pd.concat([labeled5, unlabeled5])
train6 = pd.concat([labeled6, unlabeled6])
dev = dev_dataset
dev2 = dev_dataset2
dev3 = dev_dataset3
dev4 = dev_dataset4
dev5 = dev_dataset5
dev6 = dev_dataset6
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train),len(labeled), len(unlabeled)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train2),len(labeled2), len(unlabeled2)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train3),len(labeled3), len(unlabeled3)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train4),len(labeled4), len(unlabeled4)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train5),len(labeled5), len(unlabeled5)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train6),len(labeled6), len(unlabeled6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
else:
print("MT-DNN, with reduction dataset")
train = labeled
train2 = labeled2
train3 = labeled3
train4 = labeled4
train5 = labeled5
train6 = labeled6
dev = dev_dataset
dev2 = dev_dataset2
dev3 = dev_dataset3
dev4 = dev_dataset4
dev5 = dev_dataset5
dev6 = dev_dataset6
print("Size of Train dataset is {} ".format(len(labeled)))
print("Size of Train dataset is {} ".format(len(labeled2)))
print("Size of Train dataset is {} ".format(len(labeled3)))
print("Size of Train dataset is {} ".format(len(labeled4)))
print("Size of Train dataset is {} ".format(len(labeled5)))
print("Size of Train dataset is {} ".format(len(labeled6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
else:
print("MT-DNN")
train = train_dataset
train2 = train_dataset2
train3 = train_dataset3
train4 = train_dataset4
train5 = train_dataset5
train6 = train_dataset6
dev = dev_dataset
dev2 = dev_dataset2
dev3=dev_dataset3
dev4=dev_dataset4
dev5=dev_dataset5
dev6=dev_dataset6
print("Size of Train dataset is {} ".format(len(train)))
print("Size of Train dataset is {} ".format(len(train2)))
print("Size of Train dataset is {} ".format(len(train3)))
print("Size of Train dataset is {} ".format(len(train4)))
print("Size of Train dataset is {} ".format(len(train5)))
print("Size of Train dataset is {} ".format(len(train6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
#Balancing for:
#- MT-DNN, trained on the total dataset of each task
#- MT-GAN, trained on the chosen data cutting of each task
if balancing==True:
if apply_gan== True:
print("MT-GAN")
max_train_un = max(len(unlabeled), len(unlabeled2), len(unlabeled3), len(unlabeled4), len(unlabeled5), len(unlabeled6))
print(max_train_un)
else:
print("MT-DNN")
unlabeled=train
unlabeled2=train2
unlabeled3=train3
unlabeled4=train4
unlabeled5=train5
unlabeled6=train6
max_train_un = max(len(unlabeled), len(unlabeled2), len(unlabeled3), len(unlabeled4), len(unlabeled5), len(unlabeled6))
print(max_train_un)
#double dataset
df = pd.DataFrame(columns=['id', 'label', 'sentence'])
count=0
if len(unlabeled)<max_train_un:
for i in range(max_train_un):
if i < len(unlabeled):
df = df.append({'id' : unlabeled.iloc[i, 0], 'label' : unlabeled.iloc[i, 1], 'sentence' : unlabeled.iloc[i, 2] }, ignore_index=True)
else:
if count < len(unlabeled):
df = df.append({'id' : unlabeled.iloc[count, 0], 'label' : unlabeled.iloc[count, 1], 'sentence' : unlabeled.iloc[count, 2] }, ignore_index=True)
count = count+1
else:
count = 0
df = df.append({'id' : unlabeled.iloc[count, 0], 'label' : unlabeled.iloc[count, 1], 'sentence' : unlabeled.iloc[count, 2] }, ignore_index=True)
count = count+1
unlabeled = df
if apply_gan== True:
train = pd.concat([labeled, unlabeled])
else:
train=unlabeled
df = pd.DataFrame(columns=['id', 'misogynous', 'text'])
count=0
if len(unlabeled2)<max_train_un:
for i in range(max_train_un):
if i < len(unlabeled2):
df = df.append({'id' : unlabeled2.iloc[i, 0], 'misogynous' : unlabeled2.iloc[i, 1], 'text' : unlabeled2.iloc[i, 2] }, ignore_index=True)
else:
if count < len(unlabeled2):
df = df.append({'id' : unlabeled2.iloc[count, 0], 'misogynous' : unlabeled2.iloc[count, 1], 'text' : unlabeled2.iloc[count, 2] }, ignore_index=True)
count = count+1
else:
count = 0
df = df.append({'id' : unlabeled2.iloc[count, 0], 'misogynous' : unlabeled2.iloc[count, 1], 'text' : unlabeled2.iloc[count, 2] }, ignore_index=True)
count = count+1
unlabeled2 = df
if apply_gan==True:
train2 = pd.concat([labeled2, unlabeled2])
else:
train2=unlabeled2
df = pd.DataFrame(columns=['id', 'misogyny_category', 'text'])
count=0
if len(unlabeled3)<max_train_un:
for i in range(max_train_un):
if i < len(unlabeled3):
df = df.append({'id' : unlabeled3.iloc[i, 0], 'misogyny_category' : unlabeled3.iloc[i, 1], 'text' : unlabeled3.iloc[i, 2] }, ignore_index=True)
else:
if count < len(unlabeled3):
df = df.append({'id' : unlabeled3.iloc[count, 0], 'misogyny_category' : unlabeled3.iloc[count, 1], 'text' : unlabeled3.iloc[count, 2] }, ignore_index=True)
count = count+1
else:
count = 0
df = df.append({'id' : unlabeled3.iloc[count, 0], 'misogyny_category' : unlabeled3.iloc[count, 1], 'text' : unlabeled3.iloc[count, 2] }, ignore_index=True)
count = count+1
unlabeled3 = df
if apply_gan==True:
train3 = | pd.concat([labeled3, unlabeled3]) | pandas.concat |
#
# Copyright (c) 2015 - 2022, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
GEOPM IO - Helper module for parsing/processing report and trace files.
"""
from __future__ import absolute_import
from __future__ import division
from builtins import str
from collections import OrderedDict
import os
import json
import re
import pandas
import numpy
import glob
import sys
import subprocess
import psutil
import copy
import yaml
import io
import hashlib
from distutils.spawn import find_executable
from natsort import natsorted
from . import __version__
from . import update_report
try:
_, os.environ['COLUMNS'] = subprocess.check_output(['stty', 'size']).decode().split()
except subprocess.CalledProcessError:
os.environ['COLUMNS'] = "200"
pandas.set_option('display.width', int(os.environ['COLUMNS']))
pandas.set_option('display.max_colwidth', 80)
pandas.set_option('max_columns', 100)
class AppOutput(object):
"""The container class for all trace related data.
This class holds the relevant objects for parsing and indexing all
data that is output from GEOPM. This object can be created with a
a trace glob string that will be used
to search dir_name for the relevant files. If files are found
their data will be parsed into objects for easy data access.
Additionally a Pandas DataFrame is constructed containing all of
all of the
trace data. These DataFrames are indexed based on the version of
GEOPM found in the files, the profile name, agent name, and the number
of times that particular configuration has been seen by the parser
(i.e. experiment iteration).
Attributes:
trace_glob: The string pattern to use to search for trace files.
dir_name: The directory path to use when searching for files.
verbose: A bool to control whether verbose output is printed to stdout.
"""
def __init__(self, traces=None, dir_name='.', verbose=False, do_cache=True):
self._traces = {}
self._traces_df = | pandas.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
from usal_echo.d00_utils.log_utils import setup_logging
from usal_echo.d00_utils.db_utils import (
dbReadWriteClean,
dbReadWriteViews,
dbReadWriteMeasurement,
)
logger = setup_logging(__name__, __name__)
def get_recommendation(row):
return (
"normal"
if row["measurement_value"] >= 60
else "abnormal"
if row["measurement_value"] < 40
else "greyzone"
)
def retrieve_meas():
"""Write ground truth volumes, ejection fractions, and recommendations."""
io_clean = dbReadWriteClean()
io_views = dbReadWriteViews()
io_measurement = dbReadWriteMeasurement()
# For measurement names and units on the study level.
measurement_abstract_rpt_df = io_clean.get_table("measurement_abstract_rpt")
measurement_abstract_rpt_df = measurement_abstract_rpt_df.drop(["value"], axis=1)
# For measurement values on the instance/indexinmglist/meassequence level.
a_measgraphref_df = io_clean.get_table("a_measgraphref")
a_measgraphref_df = a_measgraphref_df.drop(
["srinstanceidk", "imagesopinstanceuid", "measurementuid"], axis=1
)
# For instances with A2C/A4C views.
instances_w_labels_df = io_views.get_table("instances_w_labels")
instances_w_a2c_a4c_labels_df = instances_w_labels_df[
(instances_w_labels_df["view"] != "plax")
]
instances_w_a2c_a4c_labels_df = instances_w_a2c_a4c_labels_df[
["studyidk", "instanceidk", "filename"]
]
# All measurement values for A2C/A4C instances with measurement names and units.
merge_df = measurement_abstract_rpt_df.merge(
a_measgraphref_df, on=["studyidk", "measabstractnumber"]
)
merge_df = merge_df.merge(
instances_w_a2c_a4c_labels_df, on=["studyidk", "instanceidk"]
)
# To calculate ejection fractions, need gold-standard end systole/diastole volumes (MDD-ps4, non-negative).
filter_df = merge_df[merge_df["name"].isin(["VTD(MDD-ps4)", "VTS(MDD-ps4)"])]
filter_df = filter_df[filter_df["value"] > 0]
# Rename and reorder columns for measurement schema.
rename_df = filter_df[
[
"studyidk",
"instanceidk",
"filename",
"name",
"unitname",
"value",
"indexinmglist",
]
]
rename_df = rename_df.rename(
columns={
"studyidk": "study_id",
"instanceidk": "instance_id",
"filename": "file_name",
"name": "measurement_name",
"unitname": "measurement_unit",
"value": "measurement_value",
}
)
# Get median measurement values over meassequence/indexinmglist.
agg_dict = {
"measurement_unit": pd.Series.unique,
"measurement_value": pd.Series.median,
}
volume_df = (
rename_df.groupby(
[
"study_id",
"instance_id",
"file_name",
"measurement_name",
"indexinmglist",
]
)
.agg(agg_dict)
.reset_index()
)
volume_df = (
volume_df.groupby(["study_id", "instance_id", "file_name", "measurement_name"])
.agg(agg_dict)
.reset_index()
)
# Get diastole and systole volumes that are in the same instances.
diastole_df = volume_df[volume_df["measurement_name"].str.contains("VTD")]
systole_df = volume_df[volume_df["measurement_name"].str.contains("VTS")]
diastole_df = diastole_df.drop(["measurement_name", "measurement_unit"], axis=1)
systole_df = systole_df.drop(["measurement_name", "measurement_unit"], axis=1)
diastole_df = diastole_df[
diastole_df["instance_id"].isin(systole_df["instance_id"].unique())
]
systole_df = systole_df[
systole_df["instance_id"].isin(diastole_df["instance_id"].unique())
]
# Calculate ejection fractions where diastole volume is no less than systole volume.
ef_df = diastole_df.merge(
systole_df, on=["study_id", "instance_id"], suffixes=["_diastole", "_systole"]
)
ef_df = ef_df[
ef_df["measurement_value_diastole"] >= ef_df["measurement_value_systole"]
]
ef_df["file_name"] = ef_df["file_name_diastole"]
ef_df["measurement_name"] = "FE(MDD-ps4)"
ef_df["measurement_unit"] = "%"
ef_df["measurement_value"] = (
(ef_df["measurement_value_diastole"] - ef_df["measurement_value_systole"])
/ ef_df["measurement_value_diastole"]
* 100
)
ef_df = ef_df.drop(
[
"file_name_diastole",
"measurement_value_diastole",
"file_name_systole",
"measurement_value_systole",
],
axis=1,
)
# Get recommendations based on ejection fraction values.
recommendation_df = ef_df.copy()
recommendation_df["measurement_name"] = "recommendation"
recommendation_df["measurement_unit"] = ""
recommendation_df["measurement_value"] = recommendation_df.apply(
get_recommendation, axis=1
)
# Write volumes, ejection fractions, and recommendations.
ground_truth_df = volume_df.append(ef_df).append(recommendation_df)
ground_truth_df["file_name"] = (
"a_"
+ ground_truth_df["study_id"].astype(str)
+ "_"
+ ground_truth_df["file_name"]
)
# Add serial id.
old_ground_truth_df = io_measurement.get_table("ground_truths")
start = len(old_ground_truth_df)
ground_truth_id = | pd.Series(start + ground_truth_df.index) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 2]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, "2013-10-20"]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_group_survival_table_with_weights():
df = load_waltons()
dfw = df.groupby(["T", "E", "group"]).size().reset_index().rename(columns={0: "weights"})
gw, removedw, observedw, censoredw = utils.group_survival_table_from_events(
dfw["group"], dfw["T"], dfw["E"], weights=dfw["weights"]
)
assert len(gw) == 2
assert all(removedw.columns == ["removed:miR-137", "removed:control"])
assert all(removedw.index == observedw.index)
assert all(removedw.index == censoredw.index)
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert_frame_equal(removedw, removed)
assert_frame_equal(observedw, observed)
assert_frame_equal(censoredw, censored)
def test_survival_table_from_events_binned_with_empty_bin():
df = load_waltons()
ix = df["group"] == "miR-137"
event_table = utils.survival_table_from_events(df.loc[ix]["T"], df.loc[ix]["E"], intervals=[0, 10, 20, 30, 40, 50])
assert not pd.isnull(event_table).any().any()
def test_survival_table_from_events_at_risk_column():
df = load_waltons()
# from R
expected = [
163.0,
162.0,
160.0,
157.0,
154.0,
152.0,
151.0,
148.0,
144.0,
139.0,
134.0,
133.0,
130.0,
128.0,
126.0,
119.0,
118.0,
108.0,
107.0,
99.0,
96.0,
89.0,
87.0,
69.0,
65.0,
49.0,
38.0,
36.0,
27.0,
24.0,
14.0,
1.0,
]
df = utils.survival_table_from_events(df["T"], df["E"])
assert list(df["at_risk"][1:]) == expected # skip the first event as that is the birth time, 0.
def test_survival_table_to_events_casts_to_float():
T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))
d = utils.survival_table_from_events(T, C, np.zeros_like(T))
npt.assert_array_equal(d["censored"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
npt.assert_array_equal(d["removed"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))
def test_group_survival_table_from_events_works_with_series():
df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=["duration", "E", "G"])
ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))
npt.assert_array_equal(ug, np.array([3, 2]))
def test_survival_table_from_events_will_collapse_if_asked():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True)
assert table.index.tolist() == [
pd.Interval(-0.001, 3.5089999999999999, closed="right"),
pd.Interval(3.5089999999999999, 7.0179999999999998, closed="right"),
]
def test_survival_table_from_events_will_collapse_to_desired_bins():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])
assert table.index.tolist() == [pd.Interval(-0.001, 4, closed="right"), pd.Interval(4, 8, closed="right")]
def test_cross_validator_returns_k_results():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 5
def test_cross_validator_returns_fitters_k_results():
cf = CoxPHFitter()
fitters = [cf, cf]
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 3
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 5
def test_cross_validator_with_predictor():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
def test_cross_validator_with_stratified_cox_model():
cf = CoxPHFitter(strata=["race"])
utils.k_fold_cross_validation(cf, load_rossi(), duration_col="week", event_col="arrest")
def test_cross_validator_with_specific_loss_function():
cf = CoxPHFitter()
results_sq = utils.k_fold_cross_validation(
cf, load_regression_dataset(), scoring_method="concordance_index", duration_col="T", event_col="E"
)
def test_concordance_index():
size = 1000
T = np.random.normal(size=size)
P = np.random.normal(size=size)
C = np.random.choice([0, 1], size=size)
Z = np.zeros_like(T)
# Zeros is exactly random
assert utils.concordance_index(T, Z) == 0.5
assert utils.concordance_index(T, Z, C) == 0.5
# Itself is 1
assert utils.concordance_index(T, T) == 1.0
assert utils.concordance_index(T, T, C) == 1.0
# Random is close to 0.5
assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
def test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = [0] * n
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_no_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = None
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_non_negative_T_and_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = np.linspace(0, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = np.linspace(-n / 2, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_raises_value_error_if_too_early_births():
n = 10
T = np.arange(0, n)
C = [True] * n
min_obs = T.copy()
min_obs[1] = min_obs[1] + 10
with pytest.raises(ValueError):
utils.survival_table_from_events(T, C, min_obs)
class TestLongDataFrameUtils(object):
@pytest.fixture
def seed_df(self):
df = pd.DataFrame.from_records([{"id": 1, "var1": 0.1, "T": 10, "E": 1}, {"id": 2, "var1": 0.5, "T": 12, "E": 0}])
return utils.to_long_format(df, "T")
@pytest.fixture
def cv1(self):
return pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var2": 1.4},
{"id": 1, "t": 4, "var2": 1.2},
{"id": 1, "t": 8, "var2": 1.5},
{"id": 2, "t": 0, "var2": 1.6},
]
)
@pytest.fixture
def cv2(self):
return pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 6, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E"
)
assert_frame_equal(df21, df12, check_like=True)
def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True
)
assert_frame_equal(df21, df12, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = | pd.DataFrame.from_records([{"id": 1, "t": 1, "var1": 1.0}, {"id": 1, "t": 2, "var1": 2.0}]) | pandas.DataFrame.from_records |
from __future__ import print_function
import collections
import os
import re
import sys
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', 'utils'))
sys.path.append(lib_path)
from data_utils import get_file
global_cache = {}
SEED = 2017
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
def impute_and_scale(df, scaling='std'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = df.dropna(axis=1, how='all')
imputer = Imputer(strategy='mean', axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = | pd.DataFrame(mat, columns=df.columns) | pandas.DataFrame |
import pandas as pd
#import geopandas as gpd
import numpy as np
import os
#from sqlalchemy import create_engine
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
import math
#from shapely import wkt
from datetime import datetime, timedelta, date
import time
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
import requests
from pyspark.sql import SparkSession
from pyspark.sql.functions import substring, length, col, expr
from pyspark.sql.types import *
import matplotlib.pyplot as plt
#import contextily as cx --> gives error?
spark = SparkSession \
.builder \
.getOrCreate()
def get_minio_herkomst_2020():
bucket = "gvb-gvb"
data_key = "*/*/*/Datalab_Reis_Herkomst_Uur_*.csv"
data_location = bucket + "/" + data_key
schema_herkomst = StructType([StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van vertrek)", StringType(), True),
StructField("VertrekHalteCode", StringType(), True),
StructField("VertrekHalteNaam", StringType(), True),
StructField("HerkomstLat", StringType(), True),
StructField("HerkomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_herkomst = ["Datum","UurgroepOmschrijving (van vertrek)","VertrekHalteCode","VertrekHalteNaam","AantalReizen"]
gvb_herkomst_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_herkomst, sep = ";").select(*cols_herkomst)
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.distinct()
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.toPandas()
return gvb_herkomst_raw_csv
def get_minio_bestemming_2020 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/*/*/*/Datalab_Reis_Bestemming_Uur_*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_bestemming = StructType(
[StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van aankomst)", StringType(), True),
StructField("AankomstHalteCode", StringType(), True),
StructField("AankomstHalteNaam", StringType(), True),
StructField("AankomstLat", StringType(), True),
StructField("AankomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_bestemming = ["Datum","UurgroepOmschrijving (van aankomst)","AankomstHalteCode","AankomstHalteNaam","AantalReizen"]
gvb_bestemming_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_bestemming, sep = ";").select(*cols_bestemming)
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.distinct()
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.toPandas()
return gvb_bestemming_raw_csv
def get_minio_herkomst_2021 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/2021/*/*/Datalab_Reis_Herkomst_Uur_2021*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_herkomst = StructType([StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van vertrek)", StringType(), True),
StructField("VertrekHalteCode", StringType(), True),
StructField("VertrekHalteNaam", StringType(), True),
StructField("HerkomstLat", StringType(), True),
StructField("HerkomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_herkomst = ["Datum","UurgroepOmschrijving (van vertrek)","VertrekHalteCode","VertrekHalteNaam","AantalReizen"]
gvb_herkomst_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_herkomst, sep =";").select(*cols_herkomst)
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.distinct()
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.toPandas()
return gvb_herkomst_raw_csv
def get_minio_bestemming_2021 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/2021/*/*/Datalab_Reis_Bestemming_Uur_2021*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_bestemming = StructType(
[StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van aankomst)", StringType(), True),
StructField("AankomstHalteCode", StringType(), True),
StructField("AankomstHalteNaam", StringType(), True),
StructField("AankomstLat", StringType(), True),
StructField("AankomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_bestemming = ["Datum","UurgroepOmschrijving (van aankomst)","AankomstHalteCode","AankomstHalteNaam","AantalReizen"]
gvb_bestemming_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_bestemming, sep = ";").select(*cols_bestemming)
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.distinct()
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.toPandas()
return gvb_bestemming_raw_csv
def read_csv_dir(dir):
read_csv_beta = pd.read_csv(dir,sep=';')
return read_csv_beta
def get_knmi_obs():
knmi_obs_schema = StructType([StructField("DD", StringType(), True),
StructField("DR", StringType(), True),
StructField("FF", StringType(), True),
StructField("FH", StringType(), True),
StructField("FX", StringType(), True),
StructField("IX", StringType(), True),
StructField("M", IntegerType(), True),
StructField("N", IntegerType(), True),
StructField("O", IntegerType(), True),
StructField("P", IntegerType(), True),
StructField("Q", IntegerType(), True),
StructField("R", IntegerType(), True),
StructField("RH", IntegerType(), True),
StructField("S", IntegerType(), True),
StructField("SQ", IntegerType(), True),
StructField("T", IntegerType(), True),
StructField("T10N", IntegerType(), True),
StructField("TD", IntegerType(), True),
StructField("U", IntegerType(), True),
StructField("VV", IntegerType(), True),
StructField("WW", IntegerType(), True),
StructField("Y", IntegerType(), True),
StructField("date", StringType(), True),
StructField("hour", IntegerType(), True),
StructField("station_code", IntegerType(), True)
])
knmi_obs = spark.read.format("json").option("header", "true").load("s3a://knmi-knmi/topics/knmi-observations/2021/*/*/*", schema=knmi_obs_schema)
return knmi_obs
def get_knmi_preds():
knmi_pred_schema = StructType([StructField("cape", IntegerType(), True),
StructField("cond", StringType(), True),
StructField("gr", StringType(), True),
StructField("gr_w", StringType(), True),
StructField("gust", StringType(), True),
StructField("gustb", StringType(), True),
StructField("gustkmh", StringType(), True),
StructField("gustkt", StringType(), True),
StructField("hw", StringType(), True),
StructField("ico", StringType(), True),
StructField("icoon", StringType(), True),
StructField("loc", StringType(), True),
StructField("luchtd", StringType(), True),
StructField("luchtdinhg", StringType(), True),
StructField("luchtdmmhg", StringType(), True),
StructField("lw", StringType(), True),
StructField("mw", StringType(), True),
StructField("neersl", StringType(), True),
StructField("offset", StringType(), True),
StructField("rv", StringType(), True),
StructField("samenv", IntegerType(), True),
StructField("temp", StringType(), True),
StructField("tijd", StringType(), True),
StructField("tijd_nl", StringType(), True),
StructField("tw", StringType(), True),
StructField("vis", StringType(), True),
StructField("windb", StringType(), True),
StructField("windkmh", StringType(), True),
StructField("windknp", StringType(), True),
StructField("windr", StringType(), True),
StructField("windrltr", StringType(), True),
StructField("winds", StringType(), True)
])
knmi_pred_cols = ('cape', 'cond', 'gr', 'gr_w', 'gust', 'gustb', 'gustkmh', 'gustkt',
'hw', 'ico', 'icoon', 'loc', 'luchtd', 'luchtdinhg', 'luchtdmmhg', 'lw',
'mw', 'neersl', 'offset', 'rv', 'samenv', 'temp', 'tijd', 'tijd_nl',
'tw', 'vis', 'windb', 'windkmh', 'windknp', 'windr', 'windrltr',
'winds')
knmi_pred = spark.read.format("json").option("header", "true").load("s3a://knmi-knmi/topics/knmi/2021/*/*/*.json.gz", schema=knmi_pred_schema).select(*knmi_pred_cols)
return knmi_pred
def get_prediction_df():
"""
Return the prediction dataframe (date- and hours only)
"""
this_year = date.today().isocalendar()[0]
this_week = date.today().isocalendar()[1]
firstdayofweek = datetime.strptime(f'{this_year}-W{int(this_week )}-1', "%Y-W%W-%w").date()
prediction_date_range = pd.date_range(first_date, periods=8, freq='D')
prediction_date_range_hour = pd.date_range(prediction_date_range.min(), prediction_date_range.max(), freq='h').delete(-1)
return prediction_date_range_hour
def get_vacations():
"""
Retrieves vacations in the Netherlands from the Government of the Netherlands (Rijksoverheid) and returns
the list of dates that are vacation dates
"""
vacations_url = 'https://opendata.rijksoverheid.nl/v1/sources/rijksoverheid/infotypes/schoolholidays?output=json'
vacations_raw = requests.get(url = vacations_url).json()
df_vacations = pd.DataFrame(columns={'vacation', 'region', 'startdate', 'enddate'})
for x in range(0, len(vacations_raw)): # Iterate through all vacation years
for y in range(0, len(vacations_raw[0]['content'][0]['vacations'])): # number of vacations in a year
dates = pd.DataFrame(vacations_raw[x]['content'][0]['vacations'][y]['regions'])
dates['vacation'] = vacations_raw[x]['content'][0]['vacations'][y]['type'].strip() # vacation name
dates['school_year'] = vacations_raw[x]['content'][0]['schoolyear'].strip() # school year
df_vacations = df_vacations.append(dates)
filtered = df_vacations[(df_vacations['region']=='noord') | (df_vacations['region']=='heel Nederland')]
vacations_date_only = pd.DataFrame(columns={'date'})
for x in range(0, len(filtered)):
df_temporary = pd.DataFrame(data = {'date':pd.date_range(filtered.iloc[x]['startdate'], filtered.iloc[x]['enddate'], freq='D') + pd.Timedelta(days=1)})
vacations_date_only = vacations_date_only.append(df_temporary)
vacations_date_only['date'] = vacations_date_only['date'].apply(lambda x: x.date)
vacations_date_only['date'] = vacations_date_only['date'].astype('datetime64[ns]')
# Since the data from Rijksoverheid starts from school year 2019-2020, add the rest of 2019 vacations manually!
kerst_18 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 1, 1), periods = 6, freq='1d')})
voorjaar_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 2, 16), periods = 9, freq='1d')})
mei_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 4, 27), periods = 9, freq='1d')})
zomer_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 7, 13), periods = 7*6 + 2, freq='1d')})
vacations_date_only = vacations_date_only.append([kerst_18, voorjaar_19, mei_19, zomer_19])
return vacations_date_only
def get_events():
"""
Event data from static file. We can store events in the database in the near future. When possible, we can get it from an API.
"""
events = pd.read_excel('events_zuidoost.xlsx', sheet_name='Resultaat', header=1)
# Clean
events.dropna(how='all', inplace=True)
events.drop(events.loc[events['Datum']=='Niet bijzonder evenementen zijn hierboven niet meegenomen.'].index, inplace=True)
events.drop(events.loc[events['Locatie'].isna()].index, inplace=True)
events.drop(events.loc[events['Locatie']=='Overig'].index, inplace=True)
events['Datum'] = events['Datum'].astype('datetime64[ns]')
# Fix location names
events['Locatie'] = events['Locatie'].apply(lambda x: x.strip()) # Remove spaces
events['Locatie'] = np.where(events['Locatie'] == 'Ziggo dome', 'Ziggo Dome', events['Locatie'])
events['Locatie'] = np.where(events['Locatie'] == 'Ziggo Dome (2x)', 'Ziggo Dome', events['Locatie'])
# Get events from 2019 from static file
events = events[events['Datum'].dt.year>=2019].copy()
events.reset_index(inplace=True)
events.drop(columns=['index'], inplace=True)
events
# Add 2020-present events manually
events = events.append({'Datum':datetime(2020, 1, 19)}, ignore_index=True) # Ajax - Sparta
events = events.append({'Datum':datetime(2020, 2, 2)}, ignore_index=True) # Ajax - PSV
events = events.append({'Datum':datetime(2020, 2, 16)}, ignore_index=True) # Ajax - RKC
events = events.append({'Datum':datetime(2020, 1, 3)}, ignore_index=True) # Ajax - AZ
# Euro 2021
events = events.append({'Datum':datetime(2021, 6, 13)}, ignore_index=True) # EURO 2020 Nederland- Oekraïne
events = events.append({'Datum':datetime(2021, 6, 17)}, ignore_index=True) # EURO 2020 Nederland- Oostenrijk
events = events.append({'Datum':datetime(2021, 6, 21)}, ignore_index=True) # EURO 2020 Noord-Macedonië - Nederland
events = events.append({'Datum':datetime(2021, 6, 26)}, ignore_index=True) # EURO 2020 Wales - Denemarken
return events
def merge_csv_json(bestemming_csv, herkomst_csv, bestemming_json, herkomst_json):
bestemming = pd.concat([bestemming_csv, bestemming_json]).copy()
herkomst = pd.concat([herkomst_csv, herkomst_json]).copy()
return [bestemming, herkomst]
def merge_bestemming_herkomst(bestemming, herkomst):
bestemming.rename(columns={'AantalReizen':'Uitchecks',
'UurgroepOmschrijving (van aankomst)':'UurgroepOmschrijving',
'AankomstHalteCode':'HalteCode',
'AankomstHalteNaam':'HalteNaam'}, inplace=True)
herkomst.rename(columns={'AantalReizen':'Inchecks',
'UurgroepOmschrijving (van vertrek)':'UurgroepOmschrijving',
'VertrekHalteCode':'HalteCode',
'VertrekHalteNaam':'HalteNaam'}, inplace=True)
merged = pd.merge(left=bestemming, right=herkomst,
left_on=['Datum', 'UurgroepOmschrijving', 'HalteNaam'],
right_on=['Datum', 'UurgroepOmschrijving', 'HalteNaam'],
how='outer')
return merged
def preprocess_gvb_data_for_modelling(gvb_df, station):
df = gvb_df[gvb_df['HalteNaam']==station].copy()
# create datetime column
df['datetime'] = df['Datum'].astype('datetime64[ns]')
df['UurgroepOmschrijving'] = df['UurgroepOmschrijving'].astype(str)
df['hour'] = df['UurgroepOmschrijving'].apply(lambda x: int(x[:2]))
# add time indications
df['week'] = df['datetime'].dt.isocalendar().week
df['month'] = df['datetime'].dt.month
df['year'] = df['datetime'].dt.year
df['weekday'] = df['datetime'].dt.weekday
hours = pd.get_dummies(df['hour'], prefix='hour')
days = pd.get_dummies(df['weekday'], prefix='weekday')
df = pd.concat([df, hours, days], axis=1)
# drop duplicates and sort
df_ok = df.drop_duplicates()
# sort values and reset index
df_ok = df_ok.sort_values(by = 'datetime')
df_ok = df_ok.reset_index(drop = True)
# drop unnecessary columns
df_ok.drop(columns=['Datum', 'UurgroepOmschrijving', 'HalteNaam'], inplace=True)
# rename columns
df_ok.rename(columns={'Inchecks':'check-ins', 'Uitchecks':'check-outs'}, inplace=True)
return df_ok
def preprocess_knmi_data_hour(df_raw):
"""
Prepare the raw knmi data for modelling.
We rename columns and resample from 60min to 15min data.
Also, we will create a proper timestamp.
Documentation: https://www.daggegevens.knmi.nl/klimatologie/uurgegevens
"""
# drop duplicates
df_raw = df_raw.drop_duplicates()
# rename columns
df = df_raw.rename(columns={"DD": "wind_direction", "FH": "wind_speed_h", "FF": "wind_speed", "FX": "wind_gust",
"T": "temperature", "T10N": "temperature_min", "TD": "dew_point_temperature",
"SQ": "radiation_duration", "Q": "global_radiation",
"DR": "precipitation_duration", "RH": "precipitation_h",
"P": "pressure", "VV": "sight", "N": "cloud_cover", "U": "relative_humidity",
"WW": "weather_code", "IX": "weather_index",
"M": "fog", "R": "rain", "S": "snow", "O": "thunder", "Y": "ice"
})
# get proper datetime column
df["datetime"] = pd.to_datetime(df['date'], format='%Y%m%dT%H:%M:%S.%f') + pd.to_timedelta(df["hour"] - 1, unit = 'hours')
df["datetime"] = df["datetime"].dt.tz_convert("Europe/Amsterdam")
df = df.sort_values(by = "datetime", ascending = True)
df = df.reset_index(drop = True)
df['date'] = df['datetime'].dt.date
df['date'] = df['date'].astype('datetime64[ns]')
df['hour'] -= 1
# drop unwanted columns
df = df.drop(['datetime', 'weather_code', 'station_code'], axis = 'columns')
df = df.astype({'wind_speed':'float64', 'wind_gust':'float64','temperature':'float64','temperature_min':'float64',
'dew_point_temperature':'float64','radiation_duration':'float64','precipitation_duration':'float64',
'precipitation_h':'float64','pressure':'float64'})
# divide some columns by ten (because using 0.1 degrees C etc. as units)
col10 = ["wind_speed", "wind_gust", "temperature", "temperature_min", "dew_point_temperature",
"radiation_duration", "precipitation_duration", "precipitation_h", "pressure"]
df[col10] = df[col10] / 10
return df
def preprocess_metpre_data(df_raw):
"""
To be filled
Documentation: https://www.meteoserver.nl/weersverwachting-API.php
"""
# rename columns
df = df_raw.rename(columns={"windr": "wind_direction", "rv": "relative_humidity", "luchtd": "pressure",
"temp": "temperature", "windb": "wind_force", "winds": "wind_speed",
"gust": "wind_gust", "vis": "sight_m", "neersl": "precipitation_h",
"gr": "global_radiation", "tw": "clouds"
})
# drop duplicates
df = df.drop_duplicates()
# get proper datetime column
df["datetime"] = pd.to_datetime(df['tijd'], unit='s', utc = True)
df["datetime"] = df["datetime"] + pd.to_timedelta(1, unit = 'hours') ## klopt dan beter, maar waarom?
df = df.sort_values(by = "datetime", ascending = True)
df = df.reset_index(drop = True)
df["datetime"] = df["datetime"].dt.tz_convert("Europe/Amsterdam")
# new column: forecast created on
df["offset_h"] = df["offset"].astype(float)
#df["datetime_predicted"] = df["datetime"] - pd.to_timedelta(df["offset_h"], unit = 'hours')
# select only data after starting datetime
#df = df[df['datetime'] >= start_ds] # @me: move this to query later
# select latest prediction # logisch voor prediction set, niet zozeer voor training set
df = df.sort_values(by = ['datetime', 'offset_h'])
df = df.drop_duplicates(subset = 'datetime', keep = 'first')
# drop unwanted columns
df = df.drop(['tijd', 'tijd_nl', 'loc',
'icoon', 'samenv', 'ico',
'cape', 'cond', 'luchtdmmhg', 'luchtdinhg',
'windkmh', 'windknp', 'windrltr', 'wind_force',
'gustb', 'gustkt', 'gustkmh', 'wind_gust', # deze zitten er niet in voor 14 juni
'hw', 'mw', 'lw',
'offset', 'offset_h',
'gr_w'], axis = 'columns', errors = 'ignore')
# set datatypes of weather data to float
df = df.set_index('datetime')
df = df.astype('float64').reset_index()
# cloud cover similar to observations (0-9) & sight, but not really the same thing
df['cloud_cover'] = df['clouds'] / 12.5
df['sight'] = df['sight_m'] / 333
df.drop(['clouds', 'sight_m'], axis = 'columns')
# go from hourly to quarterly values
df_hour = df.set_index('datetime').resample('1h').ffill(limit = 11)
# later misschien smoothen? lijkt nu niet te helpen voor voorspelling
#df_smooth = df_15.apply(lambda x: savgol_filter(x,17,2))
#df_smooth = df_smooth.reset_index()
df_hour = df_hour.reset_index()
df_hour['date'] = df_hour['datetime'].dt.date
df_hour['date'] = df_hour['date'].astype('datetime64[ns]')
df_hour['hour'] = df_hour['datetime'].dt.hour
return df_hour # df_smooth
def preprocess_covid_data(df_raw):
# Put data to dataframe
df_raw_unpack = df_raw.T['NLD'].dropna()
df = pd.DataFrame.from_records(df_raw_unpack) # Add datetime column
df['datetime'] = pd.to_datetime(df['date_value']) # Select columns
df_sel = df[['datetime', 'stringency']] # extend dataframe to 14 days in future (based on latest value)
dates_future = pd.date_range(df['datetime'].iloc[-1], periods = 14, freq='1d')
df_future = pd.DataFrame(data = {'datetime': dates_future,
'stringency': df['stringency'].iloc[-1]}) # Add together and set index
df_final = df_sel.append(df_future.iloc[1:])
df_final = df_final.set_index('datetime')
return df_final
def preprocess_holiday_data(holidays):
df = pd.DataFrame(holidays, columns=['Date', 'Holiday'])
df['Date'] = df['Date'].astype('datetime64[ns]')
return df
def interpolate_missing_values(data_to_interpolate):
df = data_to_interpolate.copy()
random_state_value = 1 # Ensure reproducability
# Train check-ins interpolator
checkins_interpolator_cols = ['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-outs']
checkins_interpolator_targets = ['check-ins']
X_train = df.dropna()[checkins_interpolator_cols]
y_train = df.dropna()[checkins_interpolator_targets]
checkins_interpolator = RandomForestRegressor(random_state=random_state_value)
checkins_interpolator.fit(X_train, y_train)
# Train check-outs interpolator
checkouts_interpolator_cols = ['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-ins']
checkouts_interpolator_targets = ['check-outs']
X_train = df.dropna()[checkouts_interpolator_cols]
y_train = df.dropna()[checkouts_interpolator_targets]
checkouts_interpolator = RandomForestRegressor(random_state=random_state_value)
checkouts_interpolator.fit(X_train, y_train)
# Select rows which need interpolation
df_to_interpolate = df.drop(df.loc[(df['check-ins'].isna()==True) & (df['check-outs'].isna()==True)].index)
# Interpolate check-ins
checkins_missing = df_to_interpolate[(df_to_interpolate['check-outs'].isna()==False) & (df_to_interpolate['check-ins'].isna()==True)].copy()
checkins_missing['stringency'] = checkins_missing['stringency'].replace(np.nan, 0)
checkins_missing['check-ins'] = checkins_interpolator.predict(checkins_missing[['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-outs']])
# Interpolate check-outs
checkouts_missing = df_to_interpolate[(df_to_interpolate['check-ins'].isna()==False) & (df_to_interpolate['check-outs'].isna()==True)].copy()
checkouts_missing['stringency'] = checkouts_missing['stringency'].replace(np.nan, 0)
checkouts_missing['check-outs'] = checkouts_interpolator.predict(checkouts_missing[['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-ins']])
# Insert interpolated values into main dataframe
for index, row in checkins_missing.iterrows():
df.loc[df.index==index, 'check-ins'] = row['check-ins']
for index, row in checkouts_missing.iterrows():
df.loc[df.index==index, 'check-outs'] = row['check-outs']
return df
def get_crowd_last_week(df, row):
week_ago = row['datetime'] - timedelta(weeks=1)
subset_with_hour = df[(df['datetime']==week_ago) & (df['hour']==row['hour'])]
# If crowd from last week is not available at exact date- and hour combination, then get average crowd of last week.
subset_week_ago = df[(df['year']==row['year']) & (df['week']==row['week']) & (df['hour']==row['hour'])]
checkins_week_ago = 0
checkouts_week_ago = 0
if len(subset_with_hour) > 0: # return crowd from week ago at the same day/time (hour)
checkins_week_ago = subset_with_hour['check-ins'].mean()
checkouts_week_ago = subset_with_hour['check-outs'].mean()
elif len(subset_week_ago) > 0: # return average crowd the hour group a week ago
checkins_week_ago = subset_week_ago['check-ins'].mean()
checkouts_week_ago = subset_week_ago['check-outs'].mean()
return [checkins_week_ago, checkouts_week_ago]
def get_train_test_split(df):
"""
Create train and test split for 1-week ahead models. This means that the last week of the data will be used
as a test set and the rest will be the training set.
"""
most_recent_date = df['datetime'].max()
last_week = pd.date_range(df.datetime.max()-pd.Timedelta(7, unit='D')+pd.DateOffset(1), df['datetime'].max())
train = df[df['datetime']<last_week.min()]
test = df[(df['datetime']>=last_week.min()) & (df['datetime']<=last_week.max())]
return [train, test]
def get_train_val_test_split(df):
"""
Create train, validation, and test split for 1-week ahead models. This means that the last week of the data will be used
as a test set, the second-last will be the validation set, and the rest will be the training set.
"""
most_recent_date = df['datetime'].max()
last_week = pd.date_range(df.datetime.max()-pd.Timedelta(7, unit='D')+pd.DateOffset(1), df['datetime'].max())
two_weeks_before = pd.date_range(last_week.min()-pd.Timedelta(7, unit='D'), last_week.min()-pd.DateOffset(1))
train = df[df['datetime']<two_weeks_before.min()]
validation = df[(df['datetime']>=two_weeks_before.min()) & (df['datetime']<=two_weeks_before.max())]
test = df[(df['datetime']>=last_week.min()) & (df['datetime']<=last_week.max())]
return [train, validation, test]
def get_future_df(features, gvb_data, covid_stringency, holidays, vacations, weather, events):
"""
Create empty data frame for predictions of the target variable for the specfied prediction period
"""
this_year = date.today().isocalendar()[0]
this_week = date.today().isocalendar()[1]
firstdayofweek = datetime.strptime(f'{this_year}-W{int(this_week )}-1', "%Y-W%W-%w").date()
prediction_date_range = pd.date_range(firstdayofweek, periods=8, freq='D')
prediction_date_range_hour = pd.date_range(prediction_date_range.min(), prediction_date_range.max(), freq='h').delete(-1)
# Create variables
df = | pd.DataFrame({'datetime':prediction_date_range_hour}) | pandas.DataFrame |
""" Junk code from developing the method which might come in handy later.
"""
################################################################################
# Old version of run from analysis.py #
################################################################################
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from numba.typed import List
from anndata import AnnData
from sklearn.cluster import AgglomerativeClustering
from .base import calc_neighbours, get_lrs_scores, calc_distance
from .base_grouping import get_hotspots
from .het import count, count_interactions, get_interactions
def run(adata: AnnData, lrs: np.array,
use_label: str = None, use_het: str = 'cci_het',
distance: int = 0, n_pairs: int = 1000, neg_binom: bool = False,
adj_method: str = 'fdr_bh', pval_adj_cutoff: float = 0.05,
lr_mid_dist: int = 150, min_spots: int = 5, min_expr: float = 0,
verbose: bool = True, stats_method=False, quantile=0.05,
plot_diagnostics: bool = False, show_plot=False,
):
"""Wrapper function for performing CCI analysis, varrying the analysis based
on the inputted data / state of the anndata object.
Parameters
----------
adata: AnnData The data object including the cell types to count.
lrs: np.array The LR pairs to score/test for enrichment (in format 'L1_R1')
use_label: str The cell type results to use in counting.
use_het: The storage place for cell heterogeneity results in adata.obsm.
distance: int Distance to determine the neighbours (default is the nearest neighbour), distance=0 means within spot
n_pairs: int Number of random pairs to generate when performing the background distribution.
neg_binom: bool Whether to use neg-binomial distribution to estimate p-values, NOT appropriate with log1p data, alternative is to use background distribution itself (recommend higher number of n_pairs for this).
adj_method: str Parsed to statsmodels.stats.multitest.multipletests for multiple hypothesis testing correction.
lr_mid_dist: int The distance between the mid-points of the average expression of the two genes in an LR pair for it to be group with other pairs via AgglomerativeClustering to generate a common background distribution.
min_spots: int Minimum number of spots with an LR score to be considered for further testing.
min_expr: float Minimum gene expression of either L or R for spot to be considered to have reasonable score.
Returns
-------
adata: AnnData Relevant information stored: adata.uns['het'], adata.uns['lr_summary'], & data.uns['per_lr_results'].
"""
distance = calc_distance(adata, distance)
neighbours = calc_neighbours(adata, distance, verbose=verbose)
adata.uns['spot_neighbours'] = pd.DataFrame([','.join(x.astype(str))
for x in neighbours],
index=adata.obs_names, columns=['neighbour_indices'])
if verbose:
print("Spot neighbour indices stored in adata.uns['spot_neighbours']")
# Conduct with cell heterogeneity info if label_transfer provided #
cell_het = type(use_label) != type(None) and use_label in adata.uns.keys()
if cell_het:
if verbose:
print("Calculating cell hetereogeneity...")
# Calculating cell heterogeneity #
count(adata, distance=distance, use_label=use_label, use_het=use_het)
het_vals = np.array([1] * len(adata)) \
if use_het not in adata.obsm else adata.obsm[use_het]
""" 1. Filter any LRs without stored expression.
"""
# Calculating the lr_scores across spots for the inputted lrs #
lr_scores, lrs = get_lrs_scores(adata, lrs, neighbours, het_vals, min_expr)
lr_bool = (lr_scores>0).sum(axis=0) > min_spots
lrs = lrs[lr_bool]
lr_scores = lr_scores[:, lr_bool]
if verbose:
print("Altogether " + str(len(lrs)) + " valid L-R pairs")
if len(lrs) == 0:
print("Exiting due to lack of valid LR pairs.")
return
if stats_method:
""" Permutation based method.
1. Group LRs with similar mean expression.
2. Calc. common bg distrib. for grouped lrs.
3. Calc. p-values for each lr relative to bg.
"""
perform_perm_testing(adata, lr_scores, n_pairs, lrs, lr_mid_dist,
verbose, neighbours, het_vals, min_expr,
neg_binom, adj_method, pval_adj_cutoff,
)
else:
""" Perform per lr background removal to get hot-spots by choosing dynamic cutoffs.
Inspired by the watershed method:
1. Generate set of candidate cutoffs based on quantiles.
2. Perform DBScan clustering using spatial coordinates at different cutoffs.
3. Choose cutoff as point that maximises number of clusters i.e. peaks.
"""
# TODO need to evaluate this eps param better.
eps = 3*distance if type(distance)!=type(None) and distance!=0 else 100
get_hotspots(adata, lr_scores.transpose(), lrs, eps=eps,
quantile=quantile, verbose=verbose,
plot_diagnostics=plot_diagnostics, show_plot=show_plot)
def perform_perm_testing(adata: AnnData, lr_scores: np.ndarray,
n_pairs: int, lrs: np.array,
lr_mid_dist: int, verbose: float, neighbours: List,
het_vals: np.array, min_expr: float,
neg_binom: bool, adj_method: str,
pval_adj_cutoff: float,
):
""" Performs the grouped permutation testing when taking the stats approach.
"""
if n_pairs != 0: # Perform permutation testing
# Grouping spots with similar mean expression point #
genes = get_valid_genes(adata, n_pairs)
means_ordered, genes_ordered = get_ordered(adata, genes)
ims = np.array(
[get_median_index(lr_.split('_')[0], lr_.split('_')[1],
means_ordered.values, genes_ordered)
for lr_ in lrs]).reshape(-1, 1)
if len(lrs) > 1: # Multi-LR pair mode, group LRs to generate backgrounds
clusterer = AgglomerativeClustering(n_clusters=None,
distance_threshold=lr_mid_dist,
affinity='manhattan',
linkage='single')
lr_groups = clusterer.fit_predict(ims)
lr_group_set = np.unique(lr_groups)
if verbose:
print(f'{len(lr_group_set)} lr groups with similar expression levels.')
else: #Single LR pair mode, generate background for the LR.
lr_groups = np.array([0])
lr_group_set = lr_groups
res_info = ['lr_scores', 'p_val', 'p_adj', '-log10(p_adj)',
'lr_sig_scores']
n_, n_sigs = np.array([0]*len(lrs)), np.array([0]*len(lrs))
per_lr_results = {}
with tqdm(
total=len(lr_group_set),
desc="Generating background distributions for the LR pair groups..",
bar_format="{l_bar}{bar} [ time left: {remaining} ]",
) as pbar:
for group in lr_group_set:
# Determining common mid-point for each group #
group_bool = lr_groups==group
group_im = int(np.median(ims[group_bool, 0]))
# Calculating the background #
rand_pairs = get_rand_pairs(adata, genes, n_pairs,
lrs=lrs, im=group_im)
background = get_lrs_scores(adata, rand_pairs, neighbours,
het_vals, min_expr,
filter_pairs=False).ravel()
total_bg = len(background)
background = background[background!=0] #Filtering for increase speed
# Getting stats for each lr in group #
group_lr_indices = np.where(group_bool)[0]
for lr_i in group_lr_indices:
lr_ = lrs[lr_i]
lr_results = pd.DataFrame(index=adata.obs_names,
columns=res_info)
scores = lr_scores[:, lr_i]
stats = get_stats(scores, background, total_bg, neg_binom,
adj_method, pval_adj_cutoff=pval_adj_cutoff)
full_stats = [scores]+list(stats)
for vals, colname in zip(full_stats, res_info):
lr_results[colname] = vals
n_[lr_i] = len(np.where(scores>0)[0])
n_sigs[lr_i] = len(np.where(
lr_results['p_adj'].values<pval_adj_cutoff)[0])
if n_sigs[lr_i] > 0:
per_lr_results[lr_] = lr_results
pbar.update(1)
print(f"{len(per_lr_results)} LR pairs with significant interactions.")
lr_summary = pd.DataFrame(index=lrs, columns=['n_spots', 'n_spots_sig'])
lr_summary['n_spots'] = n_
lr_summary['n_spots_sig'] = n_sigs
lr_summary = lr_summary.iloc[np.argsort(-n_sigs)]
else: #Simply store the scores
per_lr_results = {}
lr_summary = | pd.DataFrame(index=lrs, columns=['n_spots']) | pandas.DataFrame |
import sqlite3
import json
import pandas as pd
class MamphiDataFetcher:
mamphi_db = ""
def __init__(self, mamphi_db=mamphi_db):
self.mamphi_db = mamphi_db
def fetch_center(self):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Zentren"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
results_json = json.dumps([dict(ix) for ix in results])
return results_json
def fetch_consent(self):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Informed_consent"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
results_json = json.dumps([dict(ix) for ix in results])
return results_json
def fetch_rand_week(self, week):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Random_Woche_{}".format(week)
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
results_json = json.dumps([dict(ix) for ix in results])
return results_json
def get_center_by_land(self, land):
if land == "Germany":
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Zentren WHERE Land = 'D'"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
german_center = json.dumps([dict(ix) for ix in results])
# number_patient = len(list_patient)
return german_center
elif land == "UK":
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Zentren WHERE Land = 'GB'"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
uk_center = json.dumps([dict(ix) for ix in results])
# number_patient = len(list_patient)
return uk_center
def update_zentren(self, center_json):
"""
:rtype: object
:param center_json: json string of the value to be inserted in the database
"""
center = json.loads(center_json)
# Compute center Id manually
values = []
if center['Land'] == "D":
german_center = self.get_center_list_country(country="D")
german_center = pd.read_json(german_center)
zentrum_id = german_center['Zentrum_Id'].max() + 1
values.append(zentrum_id)
else:
uk_center = self.get_center_list_country(country="GB")
uk_center = pd.read_json(uk_center)
zentrum_id = uk_center['Zentrum_Id'].max() + 1
values.append(zentrum_id)
for idx in center.values():
values.append(idx)
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "INSERT INTO Zentren VALUES" + str(tuple(values))
try:
cursor.execute(statement)
conn.commit()
print(cursor.rowcount, "record inserted.")
except:
conn.rollback()
conn.close()
def update_consent(self, consent_json):
consent_item = json.loads(consent_json)
values = []
# compute patient id manually
consent_list = self.fetch_consent()
consent_list = pd.read_json(consent_list)
patient_id = consent_list['Patient_Id'].max() + 1
values.append(patient_id)
for idx in consent_item.values():
values.append(idx)
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "INSERT INTO Informed_consent VALUES" + str(tuple(values))
try:
cursor.execute(statement)
conn.commit()
print(cursor.rowcount, "record inserted.")
except:
conn.rollback()
conn.close()
def update_rand_week(self, value, week):
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "INSERT INTO Random_Week_{}".format(week) + value
try:
cursor.execute(statement)
conn.commit()
print(cursor.rowcount, "record inserted.")
except:
conn.rollback()
conn.close()
def get_center_list_country(self, country):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Zentren WHERE Land = 'D'" if country == "Deutschland" \
else "SELECT * FROM Zentren WHERE Land = 'GB'"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
# results_json = json.dumps([dict(ix) for ix in results])
list_patient = json.dumps([dict(ix) for ix in results])
# number_patient = len(list_patient)
return list_patient
def fetch_consent_list(self, consent):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
if consent is "missing":
statement = "SELECT * FROM Informed_consent WHERE Einwilligung = 'nan' AND Datum != 'NaT'"
elif consent is "incomplete":
statement = "SELECT * FROM Informed_consent WHERE Einwilligung = 'nan'"
else:
statement = "SELECT * FROM Informed_consent WHERE Datum > '2019.06.03'"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
results_json = json.dumps([dict(ix) for ix in results])
return results_json
def get_number_of_patient_per_center_by_week(self, week):
results = self.fetch_rand_week(week=week)
data = pd.read_json(results)
number_patient_per_center = data.groupby(['Zentrum'])['Patient_Id'].count()
center = [idx for idx in number_patient_per_center.index]
number_of_patient = [value for value in number_patient_per_center.values]
df = pd.DataFrame({'Zentrum': center, 'Number_Of_Patient': number_of_patient})
weekly_list = df.to_json(orient='records')
return weekly_list
def get_number_patient_per_center_per_country_by_week(self, week):
"""
:return: Return list of patient per center in both country
"""
weekly_list = self.get_number_of_patient_per_center_by_week(week=week)
load_list = json.loads(weekly_list)
list_german = []
list_uk = []
for el in load_list:
if el['Zentrum'] < 200:
list_german.append(el)
else:
list_uk.append(el)
results = {'Germany': list_german, 'UK': list_uk}
return json.dumps(results)
def fetch_center_ids(self):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
query = "SELECT Zentrum_Id FROM Zentren"
response = cursor.execute(query)
results = response.fetchall()
center_ids = [dict(idx) for idx in results]
return json.dumps(center_ids)
def remove_center_by_id(self, center_id):
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "DELETE FROM Zentren WHERE Zentrum_Id = {}".format(center_id)
cursor.execute(statement)
conn.commit()
conn.close()
def remove_consent_by_id(self, patient_id):
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "DELETE FROM Informed_consent WHERE Patient_Id = {}".format(patient_id)
cursor.execute(statement)
conn.commit()
conn.close()
print("An item have been removed")
def retrieve_centres_with_number_of_patient(self):
week1 = self.get_number_of_patient_per_center_by_week(week=1)
week2 = self.get_number_of_patient_per_center_by_week(week=2)
week1_df = pd.read_json(week1)
week2_df = | pd.read_json(week2) | pandas.read_json |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import math
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import DBSCAN, KMeans
from configs import Level, LEVEL_MAP
from db.QueryBuilder import get_level_refactorings
from refactoring_statistics.query_utils import retrieve_columns
import pandas as pd
from pathlib import Path
from os import path
import numpy as np
REFACTORING_SAMPLES = 50000
STABLE_SAMPLES = 50000
REFACTORING_LEVELS = [Level.Class, Level.Method, Level.Variable, Level.Field, Level.Other]
STABLE_LEVELS = [Level.Class, Level.Method, Level.Variable, Level.Field]
STABLE_Ks = [15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100]
IMG_FORMAT = "svg"
DATASET = ""
CLASS_METRICS_Fields = ["classCbo",
"classLcom",
"classLCC",
"classTCC",
"classRfc",
"classWmc"]
CLASS_ATTRIBUTES_QTY_Fields = ["classLoc", "classUniqueWordsQty", "classNumberOfMethods", "classStringLiteralsQty",
"classNumberOfPublicFields", "classVariablesQty"]
PROCESS_METRICS_FIELDS = ["qtyOfCommits",
"bugFixCount",
"refactoringsInvolved"]
OWNERSHIP_METRICS_FIELDS = ["authorOwnership",
"qtyMajorAuthors",
"qtyMinorAuthors",
"qtyOfAuthors"]
# plot all refactoring for each level
def retrieve_aggr_refactorings(dataset, metrics, metrics_description: str, aggregate_function: str = "median"):
Path(path.dirname(SAVE_DIR_METRICS)).mkdir(parents=True, exist_ok=True)
data_path = f"{SAVE_DIR_METRICS}refactoring_{metrics_description}_{aggregate_function}.csv"
if not path.exists(data_path):
# refactoring metrics per level
combined_refactoring_metrics_agg = pd.DataFrame(columns=["refactoring", "level"] + metrics)
for level in REFACTORING_LEVELS:
for refactoring_name in LEVEL_MAP[level]:
refactoring_metrics = retrieve_columns(get_level_refactorings(int(level), refactoring_name, dataset),
metrics, REFACTORING_SAMPLES)
refactoring_metrics_agg = pd.DataFrame(columns=["refactoring", "level"] + metrics)
meta_data = [refactoring_name, int(level)]
if aggregate_function == "mean":
refactoring_metrics_agg.loc[0] = meta_data + refactoring_metrics[metrics].mean(axis=0).tolist()
elif aggregate_function == "median":
refactoring_metrics_agg.loc[0] = meta_data + refactoring_metrics[metrics].median(axis=0).tolist()
else:
raise ValueError(f"{aggregate_function} is not supported.")
combined_refactoring_metrics_agg = combined_refactoring_metrics_agg.append(refactoring_metrics_agg)
combined_refactoring_metrics_agg.to_csv(data_path)
print(
f"Finished computation of the {aggregate_function} of the {metrics_description} metrics for all refactoring levels and stored it at {data_path}.")
return combined_refactoring_metrics_agg
else:
return pd.read_csv(data_path)
def dbscan_cluster(data, eps):
return DBSCAN(eps=eps, min_samples=5).fit(data)
def kmeans_cluster(data, k):
return KMeans(n_clusters=k).fit(data)
def cluster(data, metrics, metrics_description: str, aggregate_function: str = "median", clustering: str = "DBScan",
clustering_description: str = "DBScan",
eps: int = 18, k: int = 3):
Path(path.dirname(f"{SAVE_DIR_RESULTS}Results/")).mkdir(parents=True, exist_ok=True)
result_path = f"{SAVE_DIR_RESULTS}Results/refactoring_{metrics_description}_{aggregate_function}_{clustering}_{clustering_description}.csv"
if not path.exists(result_path):
if clustering == "DBScan":
model = dbscan_cluster(data[metrics].to_numpy(), eps=eps)
elif clustering == "KMeans":
model = kmeans_cluster(data[metrics].to_numpy(), k=k)
else:
raise ValueError(f"{clustering} is not supported.")
data = {"refactoring": data["refactoring"].to_list(),
"level": data["level"].to_list(),
"cluster": list(model.labels_)}
results = | pd.DataFrame(data=data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import hydrostats.data as hd
import hydrostats.visual as hv
import HydroErr as he
import matplotlib.pyplot as plt
import os
from netCDF4 import Dataset
# *****************************************************************************************************
# *************ERA Interim*****************ERA Interim*****************ERA Interim*********************
# *****************************************************************************************************
# User Input Information:
location = 'india-1800-deltaT-ERAi' # Match output folder name from RAPIDpy
comid_list = [55596, 58238, 58317, 58384, 59818, 59909] # Comid's for which csv files are desired
dir = '/Users/chrisedwards/Documents/era5_test/India-DeltaT/outputNetCDF'
csv_dir = '/Users/chrisedwards/Documents/era5_test/India-DeltaT/timeSeries'
qout_file = 'Qout_1800dT_erai_t511_24hr_19800101to20141231.nc'
# Call the NetCDF file.
file = os.path.join(dir, location, qout_file)
nc = Dataset(file)
nc.variables.keys()
nc.dimensions.keys()
# Define variables from the NetCDF file.
riv = nc.variables['rivid'][:].tolist()
lat = nc.variables['lat'][:]
lon = nc.variables['lon'][:]
time = nc.variables['time'][:].tolist()
# Q_error = nc.variables['Qout_error'][:]
Q = nc.variables['Qout'][:]
# Convert time from 'seconds since 1970' to the actual date.
dates = pd.to_datetime(time, unit='s', origin='unix')
temp_dictionary_erai = {}
streamflow_dict_erai = {}
list_streams_erai = []
counter = 0
for n in riv:
name = 'erai-{}-{}'.format(location, n)
if Q.shape[0] > Q.shape[1]:
temp_dictionary_erai['{}'.format(name)] = | pd.DataFrame(data=Q[:, counter], index=dates, columns=['flowrate (cms)']) | pandas.DataFrame |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'price_responsive_programes': pd.BooleanDtype(),
'price_responsiveness_customers': pd.Int64Dtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'pv_current_flow_type': pd.CategoricalDtype(categories=['AC', 'DC']),
'reactive_power_output_mvar': float,
'real_time_pricing_program': pd.BooleanDtype(),
'rec_revenue': float,
'rec_sales_mwh': float,
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'reported_as_another_company': pd.StringDtype(),
'retail_marketing_activity': pd.BooleanDtype(),
'retail_sales': float,
'retail_sales_mwh': float,
'retirement_date': 'datetime64[ns]',
'revenue_class': pd.CategoricalDtype(categories=REVENUE_CLASSES),
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'rtos_of_operation': pd.StringDtype(),
'saidi_w_major_event_dats_minus_loss_of_service_minutes': float,
'saidi_w_major_event_days_minutes': float,
'saidi_wo_major_event_days_minutes': float,
'saifi_w_major_event_days_customers': float,
'saifi_w_major_event_days_minus_loss_of_service_customers': float,
'saifi_wo_major_event_days_customers': float,
'sales_for_resale': float,
'sales_for_resale_mwh': float,
'sales_mwh': float,
'sales_revenue': float,
'sales_to_ultimate_consumers_mwh': float,
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'service_area': pd.StringDtype(),
'service_type': pd.CategoricalDtype(categories=[
"bundled", "energy", "delivery",
]),
'short_form': pd.BooleanDtype(),
'sold_to_utility_mwh': float,
'solid_fuel_gasification': pd.BooleanDtype(),
'data_source': pd.StringDtype(),
'standard': pd.CategoricalDtype(categories=RELIABILITY_STANDARDS),
'startup_source_code_1': pd.StringDtype(),
'startup_source_code_2': pd.StringDtype(),
'startup_source_code_3': pd.StringDtype(),
'startup_source_code_4': pd.StringDtype(),
'state': pd.StringDtype(),
'state_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'street_address': pd.StringDtype(),
'stoker_tech': pd.BooleanDtype(),
'storage_capacity_mw': float,
'storage_customers': pd.Int64Dtype(),
'subcritical_tech': pd.BooleanDtype(),
'sulfur_content_pct': float,
'summer_capacity_mw': float,
'summer_capacity_estimate': pd.BooleanDtype(),
# TODO: check if there is any data pre-2016
'summer_estimated_capability_mw': float,
'summer_peak_demand_mw': float,
'supercritical_tech': pd.BooleanDtype(),
'supplier_name': | pd.StringDtype() | pandas.StringDtype |
import string
from flask import Blueprint
from root.modules.consolidations.dao.consolidation_dao_impl import getFileDetails, getFileList, getProjectDetails, saveConsolidation
import os
from flask import Flask, render_template, url_for, json
import re
import pandas as pd
from flask import request, jsonify
import numpy as np
mod = Blueprint ('consolidations',__name__)
@mod.route('/getConsolidationFile', methods=['GET', 'POST'])
def consolidationList():
try:
if request.method == "POST":
srcGen_ac_token = request.get_json()['srcGen_ac_token']
user_id = request.get_json()['user_id']
if user_id == '' and srcGen_ac_token == '':
return jsonify({'message': 'Mandatory field validation failed!'})
else:
getData = getFileDetails(srcGen_ac_token, user_id)
#fileName = getData['FILE_PATH'] + getData['FILE_NAME']
json_url = os.path.join(os.path.abspath(getData['FILE_PATH']), "", getData['FILE_NAME'])
data = json.load(open(json_url))
return jsonify({'FILE_CONTENT':data,'FILE_NAME':getData['FILE_NAME'],'FILE_ID':getData['FILE_ID']})
except Exception as e:
return jsonify({'error': str(e)})
@mod.route('/fileList', methods=['GET', 'POST'])
def fileList():
try:
if request.method == "POST":
project_ac_token = request.get_json()['project_ac_token']
user_id = request.get_json()['user_id']
project_details = getProjectDetails(project_ac_token, user_id)
project_id = project_details.PKEY_PROJECTS
if project_id !='' and user_id !='' :
projectFileList = getFileList(project_id, user_id)
return jsonify(projectFileList)
except Exception as e:
return jsonify({'error': str(e)})
@mod.route('/addMoreFiles', methods=['GET', 'POST'])
def addMoreFiles():
try:
if request.method == "POST":
project_ac_token = request.get_json()['project_ac_token']
user_id = request.get_json()['user_id']
files = request.get_json()['files']
#print(files)
con_attribute = list()
rowarray_list = []
resultarray_list = []
for value in files:
getData = getFileDetails(value, user_id)
json_url = os.path.join(os.path.abspath(getData['FILE_PATH']), "", getData['FILE_NAME'])
foo = open(json_url)
data = json.load(foo)
foo.close()
#dataKeys = list()
for i in data[0].keys():
#dataKeys.append(i)
if i in con_attribute:
y = "test"
else:
con_attribute.append(i)
#print(dataKeys)
#t = {'FILE_AC_TOKEN':value,'KEYS':dataKeys,'NO_KEYS':len(dataKeys),'FILE_NAME':getData['FILE_NAME']}
#rowarray_list.append(t)
#t= {'CON_ATTR':con_attribute,'NO_CON_ATTR':len(con_attribute), 'DATA':rowarray_list}
#resultarray_list.append(t)
for value in files:
getData = getFileDetails(value, user_id)
json_url = os.path.join(os.path.abspath(getData['FILE_PATH']), "", getData['FILE_NAME'])
foo = open(json_url)
data = json.load(foo)
foo.close()
dataKeys = list()
data_list = list()
for i in data[0].keys():
dataKeys.append(i)
for conval in con_attribute:
if conval in dataKeys:
data_list.append(conval)
else:
data_list.append("")
#print(dataKeys)
t = {'FILE_AC_TOKEN':value,'KEYS':data_list,'NO_KEYS':len(data_list),'FILE_NAME':getData['FILE_NAME']}
rowarray_list.append(t)
t= {'CON_ATTR':con_attribute,'NO_CON_ATTR':len(con_attribute), 'DATA':rowarray_list}
resultarray_list.append(t)
return jsonify(resultarray_list)
except Exception as e:
return jsonify({'error': str(e)})
@mod.route('/viewPreview', methods=['GET', 'POST'])
def viewPreview():
try:
if request.method == "POST":
project_ac_token = request.get_json()['project_ac_token']
user_id = request.get_json()['user_id']
actual_file = request.get_json()['actual_file']
files = request.get_json()['files']
#con_attributes = request.get_json()['con_attributes']
getData = getFileDetails(actual_file, user_id)
json_url = os.path.join(os.path.abspath(getData['FILE_PATH']), "", getData['FILE_NAME'])
foo = open(json_url)
file1 = json.load(foo)
foo.close()
column1 = list()
result_data = list();
### get columns from 1st Array
for i in file1[0].keys():
column1.append(i)
for value in files:
if actual_file!=value:
column2 = list()
common_col = list()
### Read 2nd File
getData2 = getFileDetails(value, user_id)
json_url2 = os.path.join(os.path.abspath(getData2['FILE_PATH']), "", getData2['FILE_NAME'])
foo2 = open(json_url2)
file2 = json.load(foo2)
foo2.close()
### get columns from 2nd Array
for i in file2[0].keys():
column2.append(i)
new_array = np.intersect1d(column1, column2)
for i in new_array:
common_col.append(i)
# tuple_common_col = tuple(common_col)
### Merge Both array
A = pd.DataFrame(file1)
B = pd.DataFrame(file2)
# frames = [A, B]
# merge_data = pd.concat(frames,str(common_col))
print(common_col)
merge_data = | pd.DataFrame.merge(A, B, how='inner', on=common_col) | pandas.DataFrame.merge |
import os
import cv2
import numpy as np
import pandas as pd
import dataset_settings
from util import insert_into_df, write_info, process_bimcv_image, resize_image
def prepare_bimcv_plus_data(data_path, v1_csv_path, v2_csv_path, source_url):
v1_csv = pd.read_excel(v1_csv_path, engine='openpyxl')
v2_csv = pd.read_excel(v2_csv_path, engine='openpyxl')
final_csv = | pd.concat([v1_csv, v2_csv]) | pandas.concat |
import datetime
import numpy as np
import pandas as pd
from six import iteritems
from six.moves import zip
from zipline.utils.numpy_utils import NaTns, NaTD
def next_date_frame(dates, events_by_sid, event_date_field_name):
"""
Make a DataFrame representing the simulated next known date for an event.
Parameters
----------
dates : pd.DatetimeIndex.
The index of the returned DataFrame.
events_by_sid : dict[int -> pd.Series]
Dict mapping sids to a series of dates. Each k:v pair of the series
represents the date we learned of the event mapping to the date the
event will occur.
event_date_field_name : str
The name of the date field that marks when the event occurred.
Returns
-------
next_events: pd.DataFrame
A DataFrame where each column is a security from `events_by_sid` where
the values are the dates of the next known event with the knowledge we
had on the date of the index. Entries falling after the last date will
have `NaT` as the result in the output.
See Also
--------
previous_date_frame
"""
cols = {
equity: np.full_like(dates, NaTns) for equity in events_by_sid
}
raw_dates = dates.values
for equity, df in iteritems(events_by_sid):
event_dates = df[event_date_field_name]
data = cols[equity]
if not event_dates.index.is_monotonic_increasing:
event_dates = event_dates.sort_index()
# Iterate over the raw Series values, since we're comparing against
# numpy arrays anyway.
iterkv = zip(event_dates.index.values, event_dates.values)
for knowledge_date, event_date in iterkv:
date_mask = (
(knowledge_date <= raw_dates) &
(raw_dates <= event_date)
)
value_mask = (event_date <= data) | (data == NaTns)
data[date_mask & value_mask] = event_date
return pd.DataFrame(index=dates, data=cols)
def previous_date_frame(date_index, events_by_sid, event_date_field_name):
"""
Make a DataFrame representing simulated next earnings date_index.
Parameters
----------
date_index : DatetimeIndex.
The index of the returned DataFrame.
events_by_sid : dict[int -> pd.DataFrame]
Dict mapping sids to a DataFrame. The index of the DataFrame
represents the date we learned of the event mapping to the event
data.
event_date_field_name : str
The name of the date field that marks when the event occurred.
Returns
-------
previous_events: pd.DataFrame
A DataFrame where each column is a security from `events_by_sid` where
the values are the dates of the previous event that occurred on the
date of the index. Entries falling before the first date will have
`NaT` as the result in the output.
See Also
--------
next_date_frame
"""
sids = list(events_by_sid)
out = np.full((len(date_index), len(sids)), NaTD, dtype='datetime64[ns]')
d_n = date_index[-1].asm8
for col_idx, sid in enumerate(sids):
# events_by_sid[sid] is Series mapping knowledge_date to actual
# event_date. We don't care about the knowledge date for
# computing previous earnings.
values = events_by_sid[sid][event_date_field_name].values
values = values[values <= d_n]
out[date_index.searchsorted(values), col_idx] = values
frame = pd.DataFrame(out, index=date_index, columns=sids)
frame.ffill(inplace=True)
return frame
def previous_value(date_index, events_by_sid, event_date_field, value_field,
value_field_dtype, missing_value):
"""
Make a DataFrame representing simulated next earnings date_index.
Parameters
----------
date_index : DatetimeIndex.
The index of the returned DataFrame.
events_by_sid : dict[int -> DatetimeIndex]
Dict mapping sids to a series of dates. Each k:v pair of the series
represents the date we learned of the event mapping to the date the
event will occur.
Returns
-------
previous_events: pd.DataFrame
A DataFrame where each column is a security from `events_by_sid` where
the values are the dates of the previous event that occured on the date
of the index. Entries falling before the first date will have `NaT` as
the result in the output.
See Also
--------
next_date_frame
"""
sids = list(events_by_sid)
out = np.full(
(len(date_index), len(sids)),
missing_value,
dtype=value_field_dtype
)
d_n = date_index[-1].asm8
for col_idx, sid in enumerate(sids):
# events_by_sid[sid] is DataFrame mapping knowledge_date to event
# date and value. We don't care about the knowledge date for computing
# previous values.
df = events_by_sid[sid]
df = df[df[event_date_field] <= d_n]
out[
date_index.searchsorted(df[event_date_field].values), col_idx
] = df[value_field]
frame = pd.DataFrame(out, index=date_index, columns=sids)
frame.ffill(inplace=True)
return frame
def normalize_data_query_time(dt, time, tz):
"""Apply the correct time and timezone to a date.
Parameters
----------
dt : pd.Timestamp
The original datetime that represents the date.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
Returns
-------
query_dt : pd.Timestamp
The timestamp with the correct time and date in utc.
"""
# merge the correct date with the time in the given timezone then convert
# back to utc
return pd.Timestamp(
datetime.datetime.combine(dt.date(), time),
tz=tz,
).tz_convert('utc')
def normalize_data_query_bounds(lower, upper, time, tz):
"""Adjust the first and last dates in the requested datetime index based on
the provided query time and tz.
lower : pd.Timestamp
The lower date requested.
upper : pd.Timestamp
The upper date requested.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
"""
# Subtract one day to grab things that happened on the first day we are
# requesting. This doesn't need to be a trading day, we are only adding
# a lower bound to limit the amount of in memory filtering that needs
# to happen.
lower -= datetime.timedelta(days=1)
if time is not None:
return normalize_data_query_time(
lower,
time,
tz,
), normalize_data_query_time(
upper,
time,
tz,
)
return lower, upper
def normalize_timestamp_to_query_time(df,
time,
tz,
inplace=False,
ts_field='timestamp'):
"""Update the timestamp field of a dataframe to normalize dates around
some data query time/timezone.
Parameters
----------
df : pd.DataFrame
The dataframe to update. This needs a column named ``ts_field``.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
inplace : bool, optional
Update the dataframe in place.
ts_field : str, optional
The name of the timestamp field in ``df``.
Returns
-------
df : pd.DataFrame
The dataframe with the timestamp field normalized. If ``inplace`` is
true, then this will be the same object as ``df`` otherwise this will
be a copy.
"""
if not inplace:
# don't mutate the dataframe in place
df = df.copy()
dtidx = | pd.DatetimeIndex(df.loc[:, ts_field], tz='utc') | pandas.DatetimeIndex |
import sys
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
def preprocess(df):
df.Age = df.Age.fillna(value=df.Age.mean())
# create new class U for unkown embarking locations
df.Embarked = df.Embarked.fillna(value='U')
df.Embarked = df.Embarked.replace('C','Cherbourg').replace('Q','Queenstown').replace('S','Southampton')
df.Fare = df.Fare.fillna(value=df.Fare.mean())
df.Age = df.Age.fillna(value=df.Age.mean())
df.set_index('PassengerId', inplace=True, drop=True)
df.drop('Cabin', axis=1, inplace=True)
df.drop('Ticket', axis=1, inplace=True)
df.drop('Name', axis=1, inplace=True)
return df
class Preprocesser:
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
X = preprocess(X)
return X
def get_train_X_y(path_to_data_folder):
df = pd.read_csv(f'{path_to_data_folder}/train.csv')
df = preprocess(df)
X = df.drop('Survived',axis=1)
y = df.Survived
return X, y
def get_test(path_to_data_folder):
df = pd.read_csv(f'{path_to_data_folder}/test.csv')
return preprocess(df)
class CustomScaler:
'''
This is a custom StandardScaler implementation for Pipeline.
'''
def __init__(self, continuous_cols=None):
self.continuous_cols = continuous_cols
self.ss = StandardScaler()
print(f'creating StandardScaler object for {continuous_cols} in X')
pass
def fit(self, X, y):
self.X = X
self.y = y
self.continuous = self.X[self.continuous_cols]
self.ss.fit(self.continuous)
return self
def transform(self, X):
self.scaled_data = self.ss.transform(self.continuous)
self.scaled_data = pd.DataFrame(self.scaled_data, columns=self.continuous_cols)
self.scaled_data.index = self.X.index
self.X.drop(self.continuous_cols, axis=1, inplace=True)
return pd.concat([self.X, self.scaled_data],axis=1, )
class CustomEncoder:
'''
This is a custom OneHotEncoder implementation for Pipeline
'''
def __init__(self, categorical_cols=None):
self.categories = categorical_cols
if categorical_cols:
print(f'creating a OneHotEncoder object for {categorical_cols}')
pass
def fit(self, X, y):
return self
def transform(self, X):
for col in self.categories:
ohe = OneHotEncoder()
feature = np.array(X[col]).reshape(-1,1)
ohe.fit(feature)
encoded = pd.DataFrame(ohe.transform(feature).toarray())
encoded.index = X.index
X = | pd.concat([X,encoded],axis=1) | pandas.concat |
# pip.main(['install', 'nibabel'])
# pip.main(['install', 'pynrrd'])
# pip.main(['install', 'h5py'])
# pip.main(['install', 'scikit-image'])
# pip.main(['install', 'future'])
import os
import sys
import shutil
import difflib
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as pp
import matplotlib.pyplot as plt
import nibabel
import nrrd
import h5py
try:
import pyspark
except:
pass
try:
import pyklb
except:
pass
import time
import tempfile
import multiprocessing
from scipy import io, interpolate, linalg, stats, ndimage, signal, optimize, sparse
from sklearn import cluster, mixture, decomposition, externals
from skimage import morphology, measure, filters
from skimage.external import tifffile
from PIL import Image
import xml
from builtins import range, zip
def detrend_dynamic_baseline(timesers, poly_ordr=2, tau=baseline_tau):
'''estimation of dynamic baseline for input timeseries'''
# poly_ordr polynomial order for detrending
# tau: timescale constant for baseline estimation (in seconds)
# freq_cutoff: highpass cutoff frequency
# freq_stack: frequency of imaging a single stack (in Hz)
# timeseries mean
timesers_mean = timesers.mean()
# length of interval of dynamic baseline time-scales
ltau = (np.round(tau * freq_stack / 2) * 2 + 1).astype(int)
# detrend with a low-order polynomial
xtime = np.arange(timesers.shape[0])
coefpoly = np.polyfit(xtime, timesers, poly_ordr)
timesers -= np.polyval(coefpoly, xtime)
timesers = np.concatenate((timesers[::-1], timesers, timesers[::-1]))
# highpass filter
nyquist = freq_stack / 2
if (freq_cutoff > 1e-10) and (freq_cutoff < nyquist - 1e-10):
f_rng = np.array([freq_cutoff, nyquist - 1e-10])
krnl = signal.firwin(lt, f_rng / nyquist, pass_zero=False)
timesers = signal.filtfilt(krnl, 1, timesers, padtype=None)
# restore mean
timesers = timesers - timesers.mean() + timesers_mean
# compute dynamic baseline
timesers_df = | pd.DataFrame(timesers) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import numpy as np
import pandas as pd
from pandapower.std_types import add_basic_std_types, load_std_type
from pandapower.auxiliary import PandapowerNet, get_free_id, _preserve_dtypes
from pandapower.run import reset_results
def create_empty_network(name=None, f_hz=50.):
"""
This function initializes the pandapower datastructure.
OPTIONAL:
**f_hz** (float, 50.) - power system frequency in hertz
**name** (string, None) - name for the network
RETURN:
**net** (attrdict) - PANDAPOWER attrdict with empty tables:
- bus
- ext_grid
- gen
- impedance
- line
- load
- sgen
- shunt
- trafo
- trafo3w
- ward
- xward
EXAMPLE:
net = create_empty_network()
"""
net = PandapowerNet({
# structure data
"bus": [('name', np.dtype(object)),
('vn_kv', 'f8'),
('type', np.dtype(object)),
('zone', np.dtype(object)),
('in_service', 'bool'), ],
"load": [("name", np.dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("q_kvar", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", np.dtype(object))],
"sgen": [("name", np.dtype(object)),
("bus", "i8"),
("p_kw", "f8"),
("q_kvar", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", np.dtype(object))],
"gen": [("name", np.dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("vm_pu", "f8"),
("sn_kva", "f8"),
("min_q_kvar", "f8"),
("max_q_kvar", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", np.dtype(object))],
"switch": [("bus", "i8"),
("element", "i8"),
("et", np.dtype(object)),
("type", np.dtype(object)),
("closed", "bool"),
("name", np.dtype(object))],
"shunt": [("bus", "u4"),
("name", np.dtype(object)),
("q_kvar", "f8"),
("p_kw", "f8"),
("in_service", "i8")],
"ext_grid": [("name", np.dtype(object)),
("bus", "u4"),
("vm_pu", "f8"),
("va_degree", "f8"),
("in_service", 'bool')],
"line": [("name", np.dtype(object)),
("std_type", np.dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("length_km", "f8"),
("r_ohm_per_km", "f8"),
("x_ohm_per_km", "f8"),
("c_nf_per_km", "f8"),
("imax_ka", "f8"),
("df", "f8"),
("parallel", "u4"),
("type", np.dtype(object)),
("in_service", 'bool')],
"trafo": [("name", np.dtype(object)),
("std_type", np.dtype(object)),
("hv_bus", "u4"),
("lv_bus", "u4"),
("sn_kva", "f8"),
("vn_hv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_percent", "f8"),
("vscr_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_degree", "f8"),
("tp_side", np.dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_pos", "i4"),
("in_service", 'bool')],
"trafo3w": [("name", np.dtype(object)),
("std_type", np.dtype(object)),
("hv_bus", "u4"),
("mv_bus", "u4"),
("lv_bus", "u4"),
("sn_hv_kva", "u8"),
("sn_mv_kva", "u8"),
("sn_lv_kva", "u8"),
("vn_hv_kv", "f8"),
("vn_mv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_hv_percent", "f8"),
("vsc_mv_percent", "f8"),
("vsc_lv_percent", "f8"),
("vscr_hv_percent", "f8"),
("vscr_mv_percent", "f8"),
("vscr_lv_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_mv_degree", "f8"),
("shift_lv_degree", "f8"),
("tp_side", np.dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_pos", "i4"),
("in_service", 'bool')],
"impedance": [("name", np.dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("r_pu", "f8"),
("x_pu", "f8"),
("sn_kva", "f8"),
("in_service", 'bool')],
"ward": [("name", np.dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("in_service", "f8")],
"xward": [("name", np.dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("r_ohm", "f8"),
("x_ohm", "f8"),
("vm_pu", "f8"),
("in_service", "f8")],
# geodata
"line_geodata": [("coords", np.dtype(object))],
"bus_geodata": [("x", "f8"), ("y", "f8")],
# result tables
"_empty_res_bus": [("vm_pu", "f8"),
("va_degree", "f8"),
("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_ext_grid": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_line": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo3w": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_mv_kw", "f8"),
("q_mv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_mv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_load": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_sgen": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_gen": [("p_kw", "f8"),
("q_kvar", "f8"),
("va_degree", "f8")],
"_empty_res_shunt": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_impedance": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8")],
"_empty_res_ward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_xward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
# internal
"_ppc": None,
"version": 1.0,
"converged": False,
"name": name,
"f_hz": f_hz
})
for s in net:
if isinstance(net[s], list):
net[s] = pd.DataFrame(np.zeros(0, dtype=net[s]))
add_basic_std_types(net)
reset_results(net)
return net
def create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b",
zone=None, in_service=True, max_vm_pu=np.nan,
min_vm_pu=np.nan, **kwargs):
"""
Adds one bus in table net["bus"].
Busses are the nodes of the network that all other elements connect to.
INPUT:
**net** (PandapowerNet) - The pandapower network in which the element is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force a specified ID if it is available
**vn_kv** (float, default 0.4) - The grid voltage level.
**busgeodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default k) - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
OUTPUT:
**eid** (int) - The index of the created element
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index and index in net["bus"].index:
raise UserWarning("A bus with index %s already exists" % index)
if index is None:
index = get_free_id(net["bus"])
# store dtypes
dtypes = net.bus.dtypes
net.bus.loc[index, ["name", "vn_kv", "type", "zone", "in_service"]] = \
[name, vn_kv, type, zone, bool(in_service)]
# and preserve dtypes
_preserve_dtypes(net.bus, dtypes)
if geodata:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[index, ["x", "y"]] = geodata
if not np.isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not np.isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_buses(net, nr_buses, vn_kv=0.4, index=None, name=None, type="b", geodata=None,
zone=None, in_service=True):
"""
Adds several buses in table net["bus"] at once.
Busses are the nodal points of the network that all other elements connect to.
Input:
**net** (PandapowerNet) - The pandapower network in which the element is created
**nr_buses** (int) - The number of buses that is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force a specified ID if it is available
**vn_kv** (float, default 0.4) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default k) - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
OUTPUT:
**eid** (int) - The indeces of the created elements
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index:
for idx in index:
if idx in net.bus.index:
raise UserWarning("A bus with index %s already exists" % index)
else:
bid = get_free_id(net["bus"])
index = np.arange(bid, bid + nr_buses, 1)
# TODO: not needed when concating anyways?
# store dtypes
# dtypes = net.bus.dtypes
dd = pd.DataFrame(index=index, columns=net.bus.columns)
dd["vn_kv"] = vn_kv
dd["type"] = type
dd["zone"] = zone
dd["in_service"] = in_service
dd["name"] = name
net["bus"] = pd.concat([net["bus"], dd], axis=0).reindex_axis(net["bus"].columns, axis=1)
# and preserve dtypes
# _preserve_dtypes(net.bus, dtypes)
if geodata:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[bid, ["x", "y"]] = geodata
return index
def create_load(net, bus, p_kw, q_kvar=0, sn_kva=np.nan, name=None, scaling=1., index=None,
in_service=True, type=None):
"""
Adds one load in table net["load"].
All loads are modelled in the consumer system, meaning load is positive and generation is
negative active power. Please pay attention to the correct signing of the reactive power as well.
INPUT:
**net** - The net within this load should be created
**bus** (int) - The bus id to which the load is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the load
**q_kvar** (float, default 0) - The reactive power of the load
- postive value -> load
- negative value -> generation
**sn_kva** (float, default None) - Nominal power of the load
**name** (string, default None) - The name for this load
**scaling** (float, default 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the load
**index** (int, None) - Force the specified index. If None, the next highest available index
is used
**in_service** (boolean) - True for in_service or False for out of service
OUTPUT:
**index** (int) - The index of the created element
EXAMPLE:
create_load(net, bus=0, p_kw=10., q_kvar=2.)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["load"])
if index in net["load"].index:
raise UserWarning("A load with the id %s already exists" % id)
# store dtypes
dtypes = net.load.dtypes
net.load.loc[index, ["name", "bus", "p_kw", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, scaling, q_kvar, sn_kva, bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.load, dtypes)
return index
def create_sgen(net, bus, p_kw, q_kvar=0, sn_kva=np.nan, name=None, index=None,
scaling=1., type=None, in_service=True, max_p_kw=np.nan, min_p_kw=np.nan,
max_q_kvar=np.nan, min_q_kvar=np.nan, cost_per_kw=np.nan, cost_per_kvar=np.nan,
controllable=False):
"""
Adds one static generator in table net["sgen"].
Static generators are modelled as negative PQ loads. This element is used to model generators
with a constant active and reactive power feed-in. If you want to model a voltage controlled
generator, use the generator element instead.
All elements in the grid are modelled in the consumer system, including generators!
If you want to model the generation of power, you have to assign a negative active power
to the generator. Please pay attention to the correct signing of the
reactive power as well.
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the static generator is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the static generator (negative for generation!)
**q_kvar** (float, default 0) - The reactive power of the sgen
**sn_kva** (float, default None) - Nominal power of the sgen
**name** (string, default None) - The name for this sgen
**index** (int, None) - Force the specified index. If None, the next highest available index
is used
**scaling** (float, 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the static generator
**in_service** (boolean) - True for in_service or False for out of service
OUTPUT:
**index** - The unique id of the created sgen
EXAMPLE:
create_sgen(net, 1, p_kw = -120)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["sgen"])
if index in net["sgen"].index:
raise UserWarning("A static generator with the id %s already exists" % index)
# store dtypes
dtypes = net.sgen.dtypes
net.sgen.loc[index, ["name", "bus", "p_kw", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, scaling, q_kvar, sn_kva, bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.sgen, dtypes)
if not np.isnan(min_p_kw):
if "min_p_kw" not in net.sgen.columns:
net.sgen.loc[:, "min_p_kw"] = pd.Series()
net.sgen.loc[index, "min_p_kw"] = float(min_p_kw)
if not np.isnan(max_p_kw):
if "max_p_kw" not in net.sgen.columns:
net.sgen.loc[:, "max_p_kw"] = pd.Series()
net.sgen.loc[index, "max_p_kw"] = float(max_p_kw)
if not np.isnan(min_q_kvar):
if "min_q_kvar" not in net.sgen.columns:
net.sgen.loc[:, "min_q_kvar"] = pd.Series()
net.sgen.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not np.isnan(max_q_kvar):
if "max_q_kvar" not in net.sgen.columns:
net.sgen.loc[:, "max_q_kvar"] = pd.Series()
net.sgen.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not np.isnan(cost_per_kw):
if "cost_per_kw" not in net.sgen.columns:
net.sgen.loc[:, "cost_per_kw"] = pd.Series()
net.sgen.loc[index, "cost_per_kw"] = float(cost_per_kw)
if not np.isnan(cost_per_kvar):
if "cost_per_kvar" not in net.sgen.columns:
net.sgen.loc[:, "cost_per_kvar"] = pd.Series()
net.sgen.loc[index, "cost_per_kvar"] = float(cost_per_kvar)
if controllable:
if "controllable" not in net.sgen.columns:
net.sgen.loc[:, "controllable"] = pd.Series()
net.sgen.loc[index, "controllable"] = bool(controllable)
return index
def create_gen(net, bus, p_kw, vm_pu=1., sn_kva=np.nan, name=None, index=None, max_q_kvar=np.nan,
min_q_kvar=np.nan, min_p_kw=np.nan, max_p_kw=np.nan, scaling=1., type=None,
in_service=True, cost_per_kw=np.nan, cost_per_kvar=np.nan, controllable=False):
"""
Adds a generator to the network.
Generators are always modelled as voltage controlled PV nodes, which is why the input parameter
is active power and a voltage set point. If you want to model a generator as PQ load with fixed
reactive power and variable voltage, please use a static generator instead.
INPUT:
**net** - The net within this generator should be created
**bus** (int) - The bus id to which the generator is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the generator (negative for generation!)
**vm_pu** (float, default 0) - The voltage set point of the generator.
**sn_kva** (float, None) - Nominal power of the generator
**name** (string, None) - The name for this generator
**index** (int, None) - Force the specified index. If None, the next highest available index
is used
**scaling** (float, 1.0) - scaling factor which for the active power of the generator
**type** (string, None) - type variable to classify generators
**in_service** (boolean) - True for in_service or False for out of service
OUTPUT:
**index** - The unique id of the created generator
EXAMPLE:
create_gen(net, 1, p_kw = -120, vm_pu = 1.02)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if bus in net.ext_grid.bus.values:
raise UserWarning(
"There is already an external grid at bus %u, only one voltage controlling element (ext_grid, gen) is allowed per bus." % bus)
if bus in net.gen.bus.values:
raise UserWarning(
"There is already a generator at bus %u, only one voltage controlling element (ext_grid, gen) is allowed per bus." % bus)
if index is None:
index = get_free_id(net["gen"])
if index in net["gen"].index:
raise UserWarning("A generator with the id %s already exists" % index)
# store dtypes
dtypes = net.gen.dtypes
net.gen.loc[index, ["name", "bus", "p_kw", "vm_pu", "sn_kva", "type", "in_service", "scaling"]]\
= [name, bus, p_kw, vm_pu, sn_kva, type, bool(in_service), scaling]
# and preserve dtypes
_preserve_dtypes(net.gen, dtypes)
if not np.isnan(min_p_kw):
if "min_p_kw" not in net.gen.columns:
net.gen.loc[:, "min_p_kw"] = pd.Series()
net.gen.loc[index, "min_p_kw"] = float(min_p_kw)
if not np.isnan(max_p_kw):
if "max_p_kw" not in net.gen.columns:
net.gen.loc[:, "max_p_kw"] = pd.Series()
net.gen.loc[index, "max_p_kw"] = float(max_p_kw)
if not np.isnan(min_q_kvar):
if "min_q_kvar" not in net.gen.columns:
net.gen.loc[:, "min_q_kvar"] = pd.Series()
net.gen.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not np.isnan(max_q_kvar):
if "max_q_kvar" not in net.gen.columns:
net.gen.loc[:, "max_q_kvar"] = pd.Series()
net.gen.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not np.isnan(cost_per_kw):
if "cost_per_kw" not in net.gen.columns:
net.gen.loc[:, "cost_per_kw"] = pd.Series()
net.gen.loc[index, "cost_per_kw"] = float(cost_per_kw)
if not np.isnan(cost_per_kvar):
if "cost_per_kvar" not in net.gen.columns:
net.gen.loc[:, "cost_per_kvar"] = pd.Series()
net.gen.loc[index, "cost_per_kvar"] = float(cost_per_kvar)
if controllable:
if "controllable" not in net.gen.columns:
net.gen.loc[:, "controllable"] = pd.Series()
net.gen.loc[index, "controllable"] = bool(controllable)
return index
def create_ext_grid(net, bus, vm_pu=1.0, va_degree=0., name=None, in_service=True,
s_sc_max_mva=np.nan, s_sc_min_mva=np.nan, rx_max=np.nan, rx_min=np.nan,
index=None, cost_per_kw=np.nan, cost_per_kvar=np.nan):
"""
Creates an external grid connection.
External grids represent the higher level power grid connection and are modelled as the slack
bus in the power flow calculation.
INPUT:
**net** - pandapower network
**bus** (int) - bus where the slack is connected
OPTIONAL:
**vm_pu** (float, default 1.0) - voltage at the slack node in per unit
**va_degree** (float, default 0.) - name of of the external grid*
**name** (string, default None) - name of of the external grid
**in_service** (boolean) - True for in_service or False for out of service
**Sk_max** - maximal short circuit apparent power **
**SK_min** - maximal short circuit apparent power **
**RX_max** - maximal R/X-ratio **
**RK_min** - minimal R/X-ratio **
\* only considered in loadflow if calculate_voltage_angles = True
\** only needed for short circuit calculations
EXAMPLE:
create_ext_grid(net, 1, voltage = 1.03)
"""
if index and index in net["ext_grid"].index:
raise UserWarning("An external grid with with index %s already exists" % index)
if index is None:
index = get_free_id(net["ext_grid"])
if bus in net.ext_grid.bus.values:
raise UserWarning(
"There is already an external grid at bus %u, only one voltage controlling element (ext_grid, gen) is allowed per bus." % bus)
if bus in net.gen.bus.values:
raise UserWarning(
"There is already a generator at bus %u, only one voltage controlling element (ext_grid, gen) is allowed per bus." % bus)
# store dtypes
dtypes = net.ext_grid.dtypes
net.ext_grid.loc[index, ["bus", "name", "vm_pu", "va_degree", "in_service"]] = \
[bus, name, vm_pu, va_degree, bool(in_service)]
if not np.isnan(s_sc_max_mva):
if "s_sc_max_mva" not in net.ext_grid.columns:
net.ext_grid.loc[:, "s_sc_max_mva"] = pd.Series()
net.ext_grid.at[:, "s_sc_max_mva"] = float(s_sc_max_mva)
if not np.isnan(s_sc_min_mva):
if "s_sc_min_mva" not in net.ext_grid.columns:
net.ext_grid.loc[:, "s_sc_min_mva"] = pd.Series()
net.ext_grid.at[index, "s_sc_min_mva"] = float(s_sc_min_mva)
if not np.isnan(rx_min):
if "rx_min" not in net.ext_grid.columns:
net.ext_grid.loc[:, "rx_min"] = pd.Series()
net.ext_grid.at[index, "rx_min"] = float(rx_min)
if not np.isnan(rx_max):
if "rx_max" not in net.ext_grid.columns:
net.ext_grid.loc[:, "rx_max"] = pd.Series()
net.ext_grid.at[index, "rx_max"] = float(rx_max)
if not np.isnan(cost_per_kw):
if "cost_per_kw" not in net.ext_grid.columns:
net.ext_grid.loc[:, "cost_per_kw"] = pd.Series()
net.ext_grid.loc[index, "cost_per_kw"] = float(cost_per_kw)
if not np.isnan(cost_per_kvar):
if "cost_per_kvar" not in net.ext_grid.columns:
net.ext_grid.loc[:, "cost_per_kvar"] = | pd.Series() | pandas.Series |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from textwrap import dedent
from parameterized import parameterized
import numpy as np
from numpy import nan
import pandas as pd
from zipline._protocol import handle_non_market_minutes, BarData
from zipline.assets import Asset, Equity
from zipline.errors import (
HistoryInInitialize,
HistoryWindowStartsBeforeData,
)
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.testing import (
create_minute_df_for_asset,
str_to_seconds,
MockDailyBarReader,
)
import zipline.testing.fixtures as zf
OHLC = ['open', 'high', 'low', 'close']
OHLCP = OHLC + ['price']
ALL_FIELDS = OHLCP + ['volume']
class WithHistory(zf.WithCreateBarData, zf.WithDataPortal):
TRADING_START_DT = TRADING_ENV_MIN_DATE = START_DATE = pd.Timestamp(
'2014-01-03',
tz='UTC',
)
TRADING_END_DT = END_DATE = pd.Timestamp('2016-01-29', tz='UTC')
SPLIT_ASSET_SID = 4
DIVIDEND_ASSET_SID = 5
MERGER_ASSET_SID = 6
HALF_DAY_TEST_ASSET_SID = 7
SHORT_ASSET_SID = 8
# asset1:
# - 2014-03-01 (rounds up to TRADING_START_DT) to 2016-01-29.
# - every minute/day.
# asset2:
# - 2015-01-05 to 2015-12-31
# - every minute/day.
# asset3:
# - 2015-01-05 to 2015-12-31
# - trades every 10 minutes
# SPLIT_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - splits on 2015-01-05 and 2015-01-06
# DIVIDEND_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - dividends on 2015-01-05 and 2015-01-06
# MERGER_ASSET
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - merger on 2015-01-05 and 2015-01-06
@classmethod
def init_class_fixtures(cls):
super().init_class_fixtures()
cls.trading_days = cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT
)
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.ASSET3 = cls.asset_finder.retrieve_asset(3)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.DIVIDEND_ASSET_SID,
)
cls.MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.MERGER_ASSET_SID,
)
cls.HALF_DAY_TEST_ASSET = cls.asset_finder.retrieve_asset(
cls.HALF_DAY_TEST_ASSET_SID,
)
cls.SHORT_ASSET = cls.asset_finder.retrieve_asset(
cls.SHORT_ASSET_SID,
)
@classmethod
def make_equity_info(cls):
jan_5_2015 = pd.Timestamp('2015-01-05', tz='UTC')
day_after_12312015 = pd.Timestamp('2016-01-04', tz='UTC')
return pd.DataFrame.from_dict(
{
1: {
'start_date': pd.Timestamp('2014-01-03', tz='UTC'),
'end_date': cls.TRADING_END_DT,
'symbol': 'ASSET1',
'exchange': "TEST",
},
2: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET2',
'exchange': "TEST",
},
3: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET3',
'exchange': "TEST",
},
cls.SPLIT_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'SPLIT_ASSET',
'exchange': "TEST",
},
cls.DIVIDEND_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'DIVIDEND_ASSET',
'exchange': "TEST",
},
cls.MERGER_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'MERGER_ASSET',
'exchange': "TEST",
},
cls.HALF_DAY_TEST_ASSET_SID: {
'start_date': pd.Timestamp('2014-07-02', tz='UTC'),
'end_date': day_after_12312015,
'symbol': 'HALF_DAY_TEST_ASSET',
'exchange': "TEST",
},
cls.SHORT_ASSET_SID: {
'start_date': pd.Timestamp('2015-01-05', tz='UTC'),
'end_date': pd.Timestamp('2015-01-06', tz='UTC'),
'symbol': 'SHORT_ASSET',
'exchange': "TEST",
}
},
orient='index',
)
@classmethod
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
])
@classmethod
def make_mergers_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.MERGER_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.MERGER_ASSET_SID,
}
])
@classmethod
def make_dividends_data(cls):
return pd.DataFrame([
{
# only care about ex date, the other dates don't matter here
'ex_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'amount': 2.0,
'sid': cls.DIVIDEND_ASSET_SID,
},
{
'ex_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'amount': 4.0,
'sid': cls.DIVIDEND_ASSET_SID,
}],
columns=[
'ex_date',
'record_date',
'declared_date',
'pay_date',
'amount',
'sid'],
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader(
dates=cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT,
),
)
def verify_regular_dt(self, idx, dt, mode, fields=None, assets=None):
if mode == 'daily':
freq = '1d'
else:
freq = '1m'
cal = self.trading_calendar
equity_cal = self.trading_calendars[Equity]
def reindex_to_primary_calendar(a, field):
"""
Reindex an array of prices from a window on the NYSE
calendar by the window on the primary calendar with the same
dt and window size.
"""
if mode == 'daily':
dts = cal.sessions_window(dt, -9)
# `dt` may not be a session on the equity calendar, so
# find the next valid session.
equity_sess = equity_cal.minute_to_session_label(dt)
equity_dts = equity_cal.sessions_window(equity_sess, -9)
elif mode == 'minute':
dts = cal.minutes_window(dt, -10)
equity_dts = equity_cal.minutes_window(dt, -10)
output = pd.Series(
index=equity_dts,
data=a,
).reindex(dts)
# Fill after reindexing, to ensure we don't forward fill
# with values that are being dropped.
if field == 'volume':
return output.fillna(0)
elif field == 'price':
return output.fillna(method='ffill')
else:
return output
fields = fields if fields is not None else ALL_FIELDS
assets = assets if assets is not None else [self.ASSET2, self.ASSET3]
bar_data = self.create_bardata(
simulation_dt_func=lambda: dt,
)
check_internal_consistency(
bar_data, assets, fields, 10, freq
)
for field in fields:
for asset in assets:
asset_series = bar_data.history(asset, field, 10, freq)
base = MINUTE_FIELD_INFO[field] + 2
if idx < 9:
missing_count = 9 - idx
present_count = 9 - missing_count
if field in OHLCP:
if asset == self.ASSET2:
# asset2 should have some leading nans
np.testing.assert_array_equal(
np.full(missing_count, np.nan),
asset_series[0:missing_count]
)
# asset2 should also have some real values
np.testing.assert_array_equal(
np.array(range(base,
base + present_count + 1)),
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 should be NaN the entire time
np.testing.assert_array_equal(
np.full(10, np.nan),
asset_series
)
elif field == 'volume':
if asset == self.ASSET2:
# asset2 should have some zeros (instead of nans)
np.testing.assert_array_equal(
np.zeros(missing_count),
asset_series[0:missing_count]
)
# and some real values
np.testing.assert_array_equal(
np.array(
range(base, base + present_count + 1)
) * 100,
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 is all zeros, no volume yet
np.testing.assert_array_equal(
np.zeros(10),
asset_series
)
else:
# asset3 should have data every 10 minutes
# construct an array full of nans, put something in the
# right slot, and test for comparison
position_from_end = ((idx + 1) % 10) + 1
# asset3's baseline data is 9 NaNs, then 11, then 9 NaNs,
# then 21, etc. for idx 9 to 19, value_for_asset3 should
# be a baseline of 11 (then adjusted for the individual
# field), thus the rounding down to the nearest 10.
value_for_asset3 = (((idx + 1) // 10) * 10) + \
MINUTE_FIELD_INFO[field] + 1
if field in OHLC:
asset3_answer_key = np.full(10, np.nan)
asset3_answer_key[-position_from_end] = \
value_for_asset3
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
),
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'volume':
asset3_answer_key = np.zeros(10)
asset3_answer_key[-position_from_end] = \
value_for_asset3 * 100
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
) * 100,
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'price':
# price is always forward filled
# asset2 has prices every minute, so it's easy
if asset == self.ASSET2:
# at idx 9, the data is 2 to 11
np.testing.assert_array_equal(
reindex_to_primary_calendar(
range(idx - 7, idx + 3),
field=field,
),
asset_series
)
if asset == self.ASSET3:
# Second part begins on the session after
# `position_from_end` on the NYSE calendar.
second_begin = (
dt - equity_cal.day * (position_from_end - 1)
)
# First part goes up until the start of the
# second part, because we forward-fill.
first_end = second_begin - cal.day
first_part = asset_series[:first_end]
second_part = asset_series[second_begin:]
decile_count = ((idx + 1) // 10)
# in our test data, asset3 prices will be nine
# NaNs, then ten 11s, ten 21s, ten 31s...
if len(second_part) >= 10:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
elif decile_count == 1:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
np.testing.assert_array_equal(
np.array([11] * len(second_part)),
second_part
)
else:
np.testing.assert_array_equal(
np.array([decile_count * 10 - 9] *
len(first_part)),
first_part
)
np.testing.assert_array_equal(
np.array([decile_count * 10 + 1] *
len(second_part)),
second_part
)
def check_internal_consistency(bar_data, assets, fields, bar_count, freq):
if isinstance(assets, Asset):
asset_list = [assets]
else:
asset_list = assets
if isinstance(fields, str):
field_list = [fields]
else:
field_list = fields
multi_field_dict = {
asset: bar_data.history(asset, field_list, bar_count, freq)
for asset in asset_list
}
multi_asset_dict = {
field: bar_data.history(asset_list, field, bar_count, freq)
for field in fields
}
panel = bar_data.history(asset_list, field_list, bar_count, freq)
for field in field_list:
# make sure all the different query forms are internally
# consistent
for asset in asset_list:
series = bar_data.history(asset, field, bar_count, freq)
np.testing.assert_array_equal(
series,
multi_asset_dict[field][asset]
)
np.testing.assert_array_equal(
series,
multi_field_dict[asset][field]
)
np.testing.assert_array_equal(
series,
panel[field][asset]
)
# each minute's OHLCV data has a consistent offset for each field.
# for example, the open is always 1 higher than the close, the high
# is always 2 higher than the close, etc.
MINUTE_FIELD_INFO = {
'open': 1,
'high': 2,
'low': -1,
'close': 0,
'price': 0,
'volume': 0, # unused, later we'll multiply by 100
}
class MinuteEquityHistoryTestCase(WithHistory,
zf.WithMakeAlgo,
zf.ZiplineTestCase):
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
DATA_PORTAL_FIRST_TRADING_DAY = zf.alias('TRADING_START_DT')
@classmethod
def make_equity_minute_bar_data(cls):
equities_cal = cls.trading_calendars[Equity]
data = {}
sids = {2, 5, cls.SHORT_ASSET_SID, cls.HALF_DAY_TEST_ASSET_SID}
for sid in sids:
asset = cls.asset_finder.retrieve_asset(sid)
data[sid] = create_minute_df_for_asset(
equities_cal,
asset.start_date,
asset.end_date,
start_val=2,
)
data[1] = create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2014-01-03', tz='utc'),
pd.Timestamp('2016-01-29', tz='utc'),
start_val=2,
)
asset2 = cls.asset_finder.retrieve_asset(2)
data[asset2.sid] = create_minute_df_for_asset(
equities_cal,
asset2.start_date,
equities_cal.previous_session_label(asset2.end_date),
start_val=2,
minute_blacklist=[
pd.Timestamp('2015-01-08 14:31', tz='UTC'),
pd.Timestamp('2015-01-08 21:00', tz='UTC'),
],
)
# Start values are crafted so that the thousands place are equal when
# adjustments are applied correctly.
# The splits and mergers are defined as 4:1 then 2:1 ratios, so the
# prices approximate that adjustment by quartering and then halving
# the thousands place.
data[cls.MERGER_ASSET_SID] = data[cls.SPLIT_ASSET_SID] = pd.concat((
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-05', tz='UTC'),
pd.Timestamp('2015-01-05', tz='UTC'),
start_val=8000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-06', tz='UTC'),
pd.Timestamp('2015-01-06', tz='UTC'),
start_val=2000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-07', tz='UTC'),
pd.Timestamp('2015-01-07', tz='UTC'),
start_val=1000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-08', tz='UTC'),
pd.Timestamp('2015-01-08', tz='UTC'),
start_val=1000)
))
asset3 = cls.asset_finder.retrieve_asset(3)
data[3] = create_minute_df_for_asset(
equities_cal,
asset3.start_date,
asset3.end_date,
start_val=2,
interval=10,
)
return data.items()
def test_history_in_initialize(self):
algo_text = dedent(
"""\
from zipline.api import history
def initialize(context):
history([1], 10, '1d', 'price')
def handle_data(context, data):
pass
"""
)
algo = self.make_algo(script=algo_text)
with self.assertRaises(HistoryInInitialize):
algo.run()
def test_negative_bar_count(self):
"""
Negative bar counts leak future information.
"""
with self.assertRaisesRegex(
ValueError,
"bar_count must be >= 1, but got -1"
):
self.data_portal.get_history_window(
[self.ASSET1],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
-1,
'1d',
'close',
'minute',
)
def test_daily_splits_and_mergers(self):
# self.SPLIT_ASSET and self.MERGER_ASSET had splits/mergers
# on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
for asset in [self.SPLIT_ASSET, self.MERGER_ASSET]:
# before any of the adjustments, 1/4 and 1/5
window1 = self.data_portal.get_history_window(
[asset],
self.trading_calendar.open_and_close_for_session(jan5)[1],
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(np.array([np.nan, 8389]), window1)
# straddling the first event
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# Value from 1/5 should be quartered
np.testing.assert_array_equal(
[2097.25,
# Split occurs. The value of the thousands place should
# match.
2004],
window2
)
# straddling both events!
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
3,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[1048.625, 1194.50, 1004.0],
window3
)
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-08 14:40', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# should not be adjusted
np.testing.assert_array_equal([1389, 1009], window4)
def test_daily_dividends(self):
# self.DIVIDEND_ASSET had dividends on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
asset = self.DIVIDEND_ASSET
# before any of the dividends
window1 = self.data_portal.get_history_window(
[asset],
self.trading_calendar.session_close(jan5),
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(np.array([nan, 391]), window1)
# straddling the first event
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[383.18, # 391 (last close) * 0.98 (first div)
# Dividend occurs prior.
396],
window2
)
# straddling both events!
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
3,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[367.853, # 391 (last close) * 0.98 * 0.96 (both)
749.76, # 781 (last_close) * 0.96 (second div)
786], # no adjustment
window3
)
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-08 14:40', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# should not be adjusted, should be 787 to 791
np.testing.assert_array_equal([1171, 1181], window4)
def test_minute_before_assets_trading(self):
# since asset2 and asset3 both started trading on 1/5/2015, let's do
# some history windows that are completely before that
minutes = self.trading_calendar.minutes_for_session(
self.trading_calendar.previous_session_label(pd.Timestamp(
'2015-01-05', tz='UTC'
))
)[0:60]
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
check_internal_consistency(
bar_data, [self.ASSET2, self.ASSET3], ALL_FIELDS, 10, '1m'
)
for field in ALL_FIELDS:
# OHLCP should be NaN
# Volume should be 0
asset2_series = bar_data.history(self.ASSET2, field, 10, '1m')
asset3_series = bar_data.history(self.ASSET3, field, 10, '1m')
if field == 'volume':
np.testing.assert_array_equal(np.zeros(10), asset2_series)
np.testing.assert_array_equal(np.zeros(10), asset3_series)
else:
np.testing.assert_array_equal(
np.full(10, np.nan),
asset2_series
)
np.testing.assert_array_equal(
np.full(10, np.nan),
asset3_series
)
@parameterized.expand([
('open_sid_2', 'open', 2),
('high_sid_2', 'high', 2),
('low_sid_2', 'low', 2),
('close_sid_2', 'close', 2),
('volume_sid_2', 'volume', 2),
('open_sid_3', 'open', 3),
('high_sid_3', 'high', 3),
('low_sid_3', 'low', 3),
('close_sid_3', 'close', 3),
('volume_sid_3', 'volume', 3),
])
def test_minute_regular(self, name, field, sid):
# asset2 and asset3 both started on 1/5/2015, but asset3 trades every
# 10 minutes
asset = self.asset_finder.retrieve_asset(sid)
# Check the first hour of equities trading.
minutes = self.trading_calendars[Equity].minutes_for_session(
pd.Timestamp('2015-01-05', tz='UTC')
)[0:60]
for idx, minute in enumerate(minutes):
self.verify_regular_dt(idx, minute, 'minute',
assets=[asset],
fields=[field])
def test_minute_sunday_midnight(self):
# Most trading calendars aren't open at midnight on Sunday.
sunday_midnight = pd.Timestamp('2015-01-09', tz='UTC')
# Find the closest prior minute when the trading calendar was
# open (note that if the calendar is open at `sunday_midnight`,
# this will be `sunday_midnight`).
trading_minutes = self.trading_calendar.all_minutes
last_minute = trading_minutes[trading_minutes <= sunday_midnight][-1]
sunday_midnight_bar_data = self.create_bardata(lambda: sunday_midnight)
last_minute_bar_data = self.create_bardata(lambda: last_minute)
# Ensure that we get the same results at midnight on Sunday as
# the last open minute.
with handle_non_market_minutes(sunday_midnight_bar_data):
for field in ALL_FIELDS:
np.testing.assert_array_equal(
sunday_midnight_bar_data.history(
self.ASSET2,
field,
30,
'1m',
),
last_minute_bar_data.history(self.ASSET2, field, 30, '1m')
)
def test_minute_after_asset_stopped(self):
# SHORT_ASSET's last day was 2015-01-06
# get some history windows that straddle the end
minutes = self.trading_calendars[Equity].minutes_for_session(
pd.Timestamp('2015-01-07', tz='UTC')
)[0:60]
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute
)
check_internal_consistency(
bar_data, self.SHORT_ASSET, ALL_FIELDS, 30, '1m'
)
# Reset data portal because it has advanced past next test date.
data_portal = self.make_data_portal()
# close high low open price volume
# 2015-01-06 20:47:00+00:00 768 770 767 769 768 76800
# 2015-01-06 20:48:00+00:00 769 771 768 770 769 76900
# 2015-01-06 20:49:00+00:00 770 772 769 771 770 77000
# 2015-01-06 20:50:00+00:00 771 773 770 772 771 77100
# 2015-01-06 20:51:00+00:00 772 774 771 773 772 77200
# 2015-01-06 20:52:00+00:00 773 775 772 774 773 77300
# 2015-01-06 20:53:00+00:00 774 776 773 775 774 77400
# 2015-01-06 20:54:00+00:00 775 777 774 776 775 77500
# 2015-01-06 20:55:00+00:00 776 778 775 777 776 77600
# 2015-01-06 20:56:00+00:00 777 779 776 778 777 77700
# 2015-01-06 20:57:00+00:00 778 780 777 779 778 77800
# 2015-01-06 20:58:00+00:00 779 781 778 780 779 77900
# 2015-01-06 20:59:00+00:00 780 782 779 781 780 78000
# 2015-01-06 21:00:00+00:00 781 783 780 782 781 78100
# 2015-01-07 14:31:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:32:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:33:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:34:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:35:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:36:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:37:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:38:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:39:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:40:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:41:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:42:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:43:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:44:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:45:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:46:00+00:00 NaN NaN NaN NaN NaN 0
# choose a window that contains the last minute of the asset
window_start = pd.Timestamp('2015-01-06 20:47', tz='UTC')
window_end = pd.Timestamp('2015-01-07 14:46', tz='UTC')
bar_data = BarData(
data_portal=data_portal,
simulation_dt_func=lambda: minutes[15],
data_frequency='minute',
restrictions=NoRestrictions(),
trading_calendar=self.trading_calendar,
)
bar_count = len(
self.trading_calendar.minutes_in_range(window_start, window_end)
)
window = bar_data.history(
self.SHORT_ASSET,
ALL_FIELDS,
bar_count,
'1m',
)
# Window should start with 14 values and end with 16 NaNs/0s.
for field in ALL_FIELDS:
if field == 'volume':
np.testing.assert_array_equal(
range(76800, 78101, 100),
window['volume'][0:14]
)
np.testing.assert_array_equal(
np.zeros(16),
window['volume'][-16:]
)
else:
np.testing.assert_array_equal(
np.array(range(768, 782)) + MINUTE_FIELD_INFO[field],
window[field][0:14]
)
np.testing.assert_array_equal(
np.full(16, np.nan),
window[field][-16:]
)
# now do a smaller window that is entirely contained after the asset
# ends
window = bar_data.history(self.SHORT_ASSET, ALL_FIELDS, 5, '1m')
for field in ALL_FIELDS:
if field == 'volume':
np.testing.assert_array_equal(np.zeros(5), window['volume'])
else:
np.testing.assert_array_equal(np.full(5, np.nan),
window[field])
def test_minute_splits_and_mergers(self):
# self.SPLIT_ASSET and self.MERGER_ASSET had splits/mergers
# on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
# the assets' close column starts at 2 on the first minute of
# 1/5, then goes up one per minute forever
for asset in [self.SPLIT_ASSET, self.MERGER_ASSET]:
# before any of the adjustments, last 10 minutes of jan 5
equity_cal = self.trading_calendars[Equity]
window1 = self.data_portal.get_history_window(
[asset],
equity_cal.open_and_close_for_session(jan5)[1],
10,
'1m',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
np.array(range(8380, 8390)), window1)
# straddling the first event - begins with the last 5 equity
# minutes on 2015-01-05, ends with the first 5 on
# 2015-01-06.
window2_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window2_end = pd.Timestamp('2015-01-06 14:35', tz='UTC')
window2_count = len(self.trading_calendar.minutes_in_range(
window2_start,
window2_end,
))
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
window2_count,
'1m',
'close',
'minute',
)[asset]
# five minutes from 1/5 should be halved
np.testing.assert_array_equal(
[2096.25,
2096.5,
2096.75,
2097,
2097.25],
window2[:5],
)
# Split occurs. The value of the thousands place should
# match.
np.testing.assert_array_equal(
[2000,
2001,
2002,
2003,
2004],
window2[-5:],
)
# straddling both events! on the equities calendar this is 5
# minutes of 1/7, 390 of 1/6, and 5 minutes of 1/5.
window3_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window3_end = pd.Timestamp('2015-01-07 14:35', tz='UTC')
window3_minutes = self.trading_calendar.minutes_in_range(
window3_start,
window3_end,
)
window3_count = len(window3_minutes)
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
window3_count,
'1m',
'close',
'minute',
)[asset]
# first five minutes should be 4385-4390, but eigthed
np.testing.assert_array_equal(
[1048.125, 1048.25, 1048.375, 1048.5, 1048.625],
window3[0:5]
)
# next 390 minutes (the 2015-01-06 session) should be
# 2000-2390, but halved
middle_day_open_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 14:31', tz='UTC')
)
middle_day_close_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 21:00', tz='UTC')
)
np.testing.assert_array_equal(
np.array(range(2000, 2390), dtype='float64') / 2,
window3[middle_day_open_i:middle_day_close_i + 1]
)
# final 5 minutes should be 1000-1004
np.testing.assert_array_equal(range(1000, 1005), window3[-5:])
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:40', tz='UTC'),
5,
'1m',
'close',
'minute',
)[asset]
# should not be adjusted, should be 1005 to 1009
np.testing.assert_array_equal(range(1005, 1010), window4)
def test_minute_dividends(self):
# self.DIVIDEND_ASSET had dividends on 1/6 and 1/7
# before any of the dividends
window1 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
pd.Timestamp('2015-01-05 21:00', tz='UTC'),
10,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
np.testing.assert_array_equal(np.array(range(382, 392)), window1)
# straddling the first dividend (10 active equity minutes)
window2_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window2_end = pd.Timestamp('2015-01-06 14:35', tz='UTC')
window2_count = len(
self.trading_calendar.minutes_in_range(window2_start, window2_end)
)
window2 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
window2_end,
window2_count,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
# first dividend is 2%, so the first five values should be 2% lower
# than before
np.testing.assert_array_almost_equal(
np.array(range(387, 392), dtype='float64') * 0.98,
window2[0:5]
)
# second half of window is unadjusted
np.testing.assert_array_equal(range(392, 397), window2[-5:])
# straddling both dividends (on the equities calendar, this is
# 5 minutes of 1/7, 390 of 1/6, and 5 minutes of 1/5).
window3_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window3_end = pd.Timestamp('2015-01-07 14:35', tz='UTC')
window3_minutes = self.trading_calendar.minutes_in_range(
window3_start,
window3_end,
)
window3_count = len(window3_minutes)
window3 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
window3_end,
window3_count,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
# first five minute from 1/7 should be hit by 0.9408 (= 0.98 * 0.96)
np.testing.assert_array_almost_equal(
np.around(np.array(range(387, 392), dtype='float64') * 0.9408, 3),
window3[0:5]
)
# next 390 minutes (the 2015-01-06 session) should be hit by 0.96
# (second dividend)
middle_day_open_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 14:31', tz='UTC')
)
middle_day_close_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 21:00', tz='UTC')
)
np.testing.assert_array_almost_equal(
np.array(range(392, 782), dtype='float64') * 0.96,
window3[middle_day_open_i:middle_day_close_i + 1]
)
# last 5 minutes should not be adjusted
np.testing.assert_array_equal(np.array(range(782, 787)), window3[-5:])
def test_passing_iterable_to_history_regular_hours(self):
# regular hours
current_dt = pd.Timestamp("2015-01-06 9:45", tz='US/Eastern')
bar_data = self.create_bardata(
lambda: current_dt,
)
bar_data.history(pd.Index([self.ASSET1, self.ASSET2]),
"high", 5, "1m")
def test_passing_iterable_to_history_bts(self):
# before market hours
current_dt = | pd.Timestamp("2015-01-07 8:45", tz='US/Eastern') | pandas.Timestamp |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading and preprocessing functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import zipfile
from PIL import Image
import numpy as np
import pandas as pd
from six.moves import urllib
from sklearn import preprocessing
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras import backend
from tensorflow.compat.v1.keras import datasets
from sklearn.model_selection import train_test_split
import dvrl_utils
def load_tabular_data(data_name, dict_no, noise_rate):
"""Loads Adult Income and Blog Feedback datasets.
This module loads the two tabular datasets and saves train.csv, valid.csv and
test.csv files under data_files directory.
UCI Adult data link: https://archive.ics.uci.edu/ml/datasets/Adult
UCI Blog data link: https://archive.ics.uci.edu/ml/datasets/BlogFeedback
If noise_rate > 0.0, adds noise on the datasets.
Then, saves train.csv, valid.csv, test.csv on './data_files/' directory
Args:
data_name: 'adult' or 'blog'
dict_no: training and validation set numbers
noise_rate: label corruption ratio
Returns:
noise_idx: indices of noisy samples
"""
# Loads datasets from links
uci_base_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/'
# Adult Income dataset
if data_name == 'adult':
train_url = uci_base_url + 'adult/adult.data'
test_url = uci_base_url + 'adult/adult.test'
data_train = pd.read_csv(train_url, header=None)
data_test = pd.read_csv(test_url, skiprows=1, header=None)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['Age', 'WorkClass', 'fnlwgt', 'Education', 'EducationNum',
'MaritalStatus', 'Occupation', 'Relationship', 'Race',
'Gender', 'CapitalGain', 'CapitalLoss', 'HoursPerWeek',
'NativeCountry', 'Income']
# Creates binary labels
df['Income'] = df['Income'].map({' <=50K': 0, ' >50K': 1,
' <=50K.': 0, ' >50K.': 1})
# Changes string to float
df.Age = df.Age.astype(float)
df.fnlwgt = df.fnlwgt.astype(float)
df.EducationNum = df.EducationNum.astype(float)
df.EducationNum = df.EducationNum.astype(float)
df.CapitalGain = df.CapitalGain.astype(float)
df.CapitalLoss = df.CapitalLoss.astype(float)
# One-hot encoding
df = pd.get_dummies(df, columns=['WorkClass', 'Education', 'MaritalStatus',
'Occupation', 'Relationship',
'Race', 'Gender', 'NativeCountry'])
# Sets label name as Y
df = df.rename(columns={'Income': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# Blog Feedback dataset
elif data_name == 'blog':
resp = urllib.request.urlopen(uci_base_url + '00304/BlogFeedback.zip')
zip_file = zipfile.ZipFile(io.BytesIO(resp.read()))
# Loads train dataset
train_file_name = 'blogData_train.csv'
data_train = pd.read_csv(zip_file.open(train_file_name), header=None)
# Loads test dataset
data_test = []
for i in range(29):
if i < 9:
file_name = 'blogData_test-2012.02.0'+ str(i+1) + '.00_00.csv'
else:
file_name = 'blogData_test-2012.02.'+ str(i+1) + '.00_00.csv'
temp_data = pd.read_csv(zip_file.open(file_name), header=None)
if i == 0:
data_test = temp_data
else:
data_test = pd.concat((data_test, temp_data), axis=0)
for i in range(31):
if i < 9:
file_name = 'blogData_test-2012.03.0'+ str(i+1) + '.00_00.csv'
elif i < 25:
file_name = 'blogData_test-2012.03.'+ str(i+1) + '.00_00.csv'
else:
file_name = 'blogData_test-2012.03.'+ str(i+1) + '.01_00.csv'
temp_data = pd.read_csv(zip_file.open(file_name), header=None)
data_test = pd.concat((data_test, temp_data), axis=0)
df = pd.concat((data_train, data_test), axis=0)
# Removes rows with missing data
df = df.dropna()
# Sets label and named as Y
df.columns = df.columns.astype(str)
df['280'] = 1*(df['280'] > 0)
df = df.rename(columns={'280': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# load california housing dataset (./data_files/california_housing_train.csv
# and ./data_files/california_housing_test.csv)
elif data_name == 'cali':
train_url = './data_files/california_housing_train.csv'
test_url = './data_files/california_housing_test.csv'
data_train = pd.read_csv(train_url, header=0)
data_test = pd.read_csv(test_url, header=0)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['longitude', 'latitude', 'housing_median_age', 'total_rooms', 'total_bedrooms',
'population', 'households', 'median_income', 'median_house_value']
df['longitude'] = pd.to_numeric(df['longitude'], downcast="float")
df['latitude'] = pd.to_numeric(df['latitude'], downcast="float")
df['housing_median_age'] = pd.to_numeric(df['housing_median_age'], downcast="float")
df['total_rooms'] = pd.to_numeric(df['total_rooms'], downcast="float")
df['total_bedrooms'] = pd.to_numeric(df['total_bedrooms'], downcast="float")
df['population'] = pd.to_numeric(df['population'], downcast="float")
df['households'] = pd.to_numeric(df['households'], downcast="float")
df['median_income'] = pd.to_numeric(df['median_income'], downcast="float")
df['median_house_value'] = pd.to_numeric(df['median_house_value'], downcast="float")
df['median_house_value'].where(df['median_house_value'] > 200000, 0, inplace=True)
df['median_house_value'].where(df['median_house_value'] <= 200000, 1, inplace=True)
# Sets label name as Y
df = df.rename(columns={'median_house_value': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# Extension: load fish dataset
elif data_name == 'fish':
train_url = './data_files/fish.csv'
df = pd.read_csv(train_url, header=0)
df.columns = ['species', 'length', 'weight']
df = df[(df[['length','weight']] != 0).all(axis=1)]
data_train, data_test = train_test_split(df, test_size=0.2)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['species', 'length', 'weight']
df['length'] = pd.to_numeric(df['length'], downcast="float")
df['weight'] = pd.to_numeric(df['weight'], downcast="float")
# One-hot encoding
df = pd.get_dummies(df, columns=['species'])
df['weight'].where(df['weight'] > 31, 0, inplace=True)
df['weight'].where(df['weight'] <= 31, 1, inplace=True)
# Sets label name as Y
df = df.rename(columns={'weight': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
elif data_name == 'covid':
train_url = './data_files/covid_train.csv'
df = pd.read_csv(train_url, header=0)
df = df[df.Target != 'Fatalities']
df.drop('Id', 1, inplace=True)
df.drop('County', 1,inplace=True)
df.drop('Province_State', 1, inplace=True)
df.drop('Date', 1, inplace=True)
df.drop('Target', 1, inplace=True)
data_train, data_test = train_test_split(df, test_size=0.2)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df['Population'] = pd.to_numeric(df['Population'], downcast="float")
df['Weight'] = pd.to_numeric(df['Weight'], downcast="float")
df['TargetValue'] = pd.to_numeric(df['TargetValue'], downcast="float")
# One-hot encoding
df = pd.get_dummies(df, columns=['Country_Region'])
df['TargetValue'].where(df['TargetValue'] > 13, 0, inplace=True)
df['TargetValue'].where(df['TargetValue'] <= 13, 1, inplace=True)
# Sets label name as Y
df = df.rename(columns={'TargetValue': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# Splits train, valid and test sets
train_idx = range(len(data_train))
train = df.loc[train_idx]
test_idx = range(len(data_train), len(df))
test = df.loc[test_idx]
train_idx_final = np.random.permutation(len(train))[:dict_no['train']]
temp_idx = np.random.permutation(len(test))
valid_idx_final = temp_idx[:dict_no['valid']] + len(data_train)
test_idx_final = temp_idx[dict_no['valid']:] + len(data_train)
train = train.loc[train_idx_final]
valid = test.loc[valid_idx_final]
test = test.loc[test_idx_final]
# Adds noise on labels
y_train = np.asarray(train['Y'])
y_train, noise_idx = dvrl_utils.corrupt_label(y_train, noise_rate)
train['Y'] = y_train
# Saves data
if not os.path.exists('data_files'):
os.makedirs('data_files')
train.to_csv('./data_files/train.csv', index=False)
valid.to_csv('./data_files/valid.csv', index=False)
test.to_csv('./data_files/test.csv', index=False)
# Returns indices of noisy samples
return noise_idx
def load_rossmann_data(dict_no, setting, test_store_type):
"""Loads Rossmann data.
This module loads Rossmann data for a domain adaptation application.
Rossmann data link: https://www.kaggle.com/c/rossmann-store-sales
The users should download 'rossmann-store-sales.zip' from the above link and
save it in './data_files/' directory
Args:
dict_no: the number of source and valid samples
setting: 'train-on-all', 'train-on-rest', or 'train-on-specific'
test_store_type: 'A', 'B', 'C', or 'D'
"""
# Loads datasets
zip_file = zipfile.ZipFile('./data_files/rossmann-store-sales.zip')
train_data = pd.read_csv(zip_file.open('train.csv'))
store_data = pd.read_csv(zip_file.open('store.csv'))
# Extracts features
train_data = train_data[['Store', 'Sales', 'DayOfWeek', 'Customers', 'Open',
'Promo', 'StateHoliday', 'SchoolHoliday']]
store_data = store_data[['Store', 'StoreType', 'Assortment',
'CompetitionDistance', 'CompetitionOpenSinceMonth',
'Promo2', 'Promo2SinceWeek']]
# Data preprocessing
# Fill na to 0
store_data = store_data.fillna(0)
# Converts string to int
train_data['StateHoliday'] = train_data['StateHoliday'].replace(['a', 'b',
'c'], 1)
# One-hot encoding
store_data = pd.get_dummies(store_data)
# Combines store data and train data
data_x = pd.merge(train_data, store_data, on='Store')
# Removes the samples when close
remove_idx = data_x.index[data_x['Sales'] == 0].tolist()
data_x = data_x.drop(remove_idx, axis=0)
# Renames target variable to 'Y'
data_x = data_x.rename(columns={'Sales': 'Y'})
# Defines store types
data_c = data_x[['StoreType_a', 'StoreType_b', 'StoreType_c', 'StoreType_d']]
data_c = data_c.rename(columns={'StoreType_a': 'A', 'StoreType_b': 'B',
'StoreType_c': 'C', 'StoreType_d': 'D'})
# Defines features
data_x = data_x.drop(['StoreType_a', 'StoreType_b',
'StoreType_c', 'StoreType_d'], axis=1)
# Resets index
data_x = data_x.reset_index()
data_c = data_c.reset_index()
data_x = data_x.drop(['index'], axis=1)
data_c = data_c.drop(['index'], axis=1)
# Splits source, valid, and target sets
# Random partitioning
idx = np.random.permutation(len(data_x))
source_idx = idx[:dict_no['source']]
valid_idx = idx[dict_no['source']:(dict_no['source']+dict_no['valid'])]
target_idx = idx[(dict_no['source']+dict_no['valid']):]
x_source = data_x.loc[source_idx]
c_source = data_c.loc[source_idx]
x_valid = data_x.loc[valid_idx]
c_valid = data_c.loc[valid_idx]
x_target = data_x.loc[target_idx]
c_target = data_c.loc[target_idx]
# Selects source dataset based on the setting and test_store_type
if setting == 'train-on-all':
source_sub_idx = c_source.index[c_source[test_store_type] >= 0].tolist()
elif setting == 'train-on-rest':
source_sub_idx = c_source.index[c_source[test_store_type] == 0].tolist()
elif setting == 'train-on-specific':
source_sub_idx = c_source.index[c_source[test_store_type] == 1].tolist()
# Selects valid and target datasets based on test_store_type
valid_sub_idx = c_valid.index[c_valid[test_store_type] == 1].tolist()
target_sub_idx = c_target.index[c_target[test_store_type] == 1].tolist()
# Divides source, valid, and target datasets
source = x_source.loc[source_sub_idx]
valid = x_valid.loc[valid_sub_idx]
target = x_target.loc[target_sub_idx]
source.to_csv('./data_files/source.csv', index=False)
valid.to_csv('./data_files/valid.csv', index=False)
target.to_csv('./data_files/target.csv', index=False)
return
def preprocess_data(normalization,
train_file_name, valid_file_name, test_file_name):
"""Loads datasets, divides features and labels, and normalizes features.
Args:
normalization: 'minmax' or 'standard'
train_file_name: file name of training set
valid_file_name: file name of validation set
test_file_name: file name of testing set
Returns:
x_train: training features
y_train: training labels
x_valid: validation features
y_valid: validation labels
x_test: testing features
y_test: testing labels
col_names: column names
"""
# Loads datasets
train = pd.read_csv('./data_files/'+train_file_name)
valid = pd.read_csv('./data_files/'+valid_file_name)
test = pd.read_csv('./data_files/'+test_file_name)
# Extracts label
y_train = np.asarray(train['Y'])
y_valid = np.asarray(valid['Y'])
y_test = np.asarray(test['Y'])
# Drops label
train = train.drop(columns=['Y'])
valid = valid.drop(columns=['Y'])
test = test.drop(columns=['Y'])
# Column names
col_names = train.columns.values.astype(str)
# Concatenates train, valid, test for normalization
df = | pd.concat((train, valid, test), axis=0) | pandas.concat |
# x-by-y.py - dataviz module for quick X by Y charts.
__version__ = '0.1'
__all__ = ['layout', 'callback']
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
from lib.components import x_axis_dropdown, y_axis_dropdown, DATASET, DATA_DIR
import assets.footer as footer
df = pd.read_csv(DATASET)
layout = html.Div([
html.Div([ #
html.Div(x_axis_dropdown,
id="left-selectors",
style={'width':'50%', 'float':'left', 'padding':'1vw', 'text-align':'center',
'margin':'auto', 'backgroundColor': '#'},
),
html.Div(y_axis_dropdown,
id="right-se#lectors",
style={'width':'50%', 'padding':'1vw', 'float':'left', 'text-align':"center"},
),
],
id="selector-cols",
style={'text-align':"center",
},
),
html.Div([
dcc.Graph(id='table_',
config={'displayModeBar': False},
style={'text-align': 'center','margin-left':0, "width":"auto", 'float':'left', 'display':'table'},
),
],
style={'backgroundColor':"#0f0", 'clear':"both"
}),
],
style={'backgroundColor':"#",
},
)
def callback(app):
@app.callback(
Output('x-axis-feature-dropdown', 'options'),
[Input('x-axis-dropdown', 'value')])
def update_X_dropdown(dataset):
print("old 1 fired")
df_1 = pd.read_csv(DATA_DIR + '/' + dataset)
feature_options = {col:col for col in df_1.columns}
return [{'label': i, 'value': i} for i in feature_options]
@app.callback(
Output('y-axis-feature-dropdown', 'options'),
[Input('y-axis-dropdown', 'value')])
def update_Y_dropdown(dataset):
df_2 = pd.read_csv(DATA_DIR + '/' + dataset)
feature_options = {col:col for col in df_2.columns}
return [{'label': i, 'value': i} for i in feature_options]
@app.callback(
Output('table_', 'figure'),
[Input('x-axis-dropdown', 'value'),
Input('y-axis-dropdown', 'value'),
Input('x-axis-feature-dropdown', 'value'),
Input('y-axis-feature-dropdown', 'value'),
])
def update_figure(xdata, ydata, featx, featy):
fig=go.Figure()
if featx is not None:
df1 = | pd.read_csv(DATA_DIR + '/' + xdata) | pandas.read_csv |
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
### Function to Create Colormap
def custom_div_cmap(numcolors=256, name='custom_div_cmap',colors=['black','dimgrey','lightgrey','white','palegreen','forestgreen', 'darkgreen']):
""" Create a custom colormap
Colors can be specified in any way understandable by matplotlib.colors.ColorConverter.to_rgb()
-> https://matplotlib.org/3.1.0/gallery/color/named_colors.html
"""
cmap = LinearSegmentedColormap.from_list(name=name, colors=colors, N=numcolors)
return cmap
fig, axs = plt.subplots(figsize=(15,15), sharex=True)
plt.axis('off')
files = sorted(glob('day*.json'))
for idx, file in enumerate(files):
year = int(file[-9:-5])
#year=2011
#read data
data = pd.read_json(file)
#create list of dates available (important for current year)
min_day = data.values[1,:][11,][0][0]
print(min_day)
max_day = data.values[1,:][11,][-1][0]
date = pd.period_range(pd.Timestamp(day=int(min_day[0:2]),month=int(min_day[3:5]),year=year), pd.Timestamp(day=int(max_day[0:2]),month=int(max_day[3:5]),year=year))
#create pandas Data Frames of needed data from conventional sources and solar/wind
wasser_data = pd.DataFrame(data.values[1,:][11,])[1]
biomasse_data = pd.DataFrame(data.values[2,:][11,])[1]
kernenergie_data = pd.DataFrame(data.values[3,:][11,])[1]
braunkohle_data = pd.DataFrame(data.values[4,:][11,])[1]
steinkohle_data = pd.DataFrame(data.values[5,:][11,])[1]
oel_data = pd.DataFrame(data.values[6,:][11,])[1]
gas_data = pd.DataFrame(data.values[7,:][11,])[1]
andere_data = pd.DataFrame(data.values[8,:][11,])[1]
wind_data = | pd.DataFrame(data.values[9,:][11,]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import math
# step 1/2 数据生成器
Batch_size = 20
Lens = 528 # 取640为训练和验证截点。
TEST_MANIFEST_DIR = "../data/test_data.csv"
def ts_gen(path = TEST_MANIFEST_DIR, batch_size = Batch_size):
data_list = | pd.read_csv(path) | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02-spec-gen.ipynb (unless otherwise specified).
__all__ = ['init_spec', 'load_endpoints_df', 'get_endpoint_single_attr', 'init_stream_dict',
'add_params_to_stream_dict', 'add_streams_to_spec', 'construct_spec', 'save_spec', 'load_API_yaml']
# Cell
import numpy as np
import pandas as pd
import os
import yaml
from jinja2 import Template
# Cell
def init_spec(
title='BMRS API',
description='API for the Elexon Balancing Mechanism Reporting Service',
root_url='https://api.bmreports.com'
):
API_spec = dict()
API_spec['title'] = title
API_spec['description'] = description
API_spec['root_url'] = root_url
return API_spec
# Cell
def load_endpoints_df(endpoints_fp: str='data/endpoints.csv'):
df_endpoints = | pd.read_csv(endpoints_fp) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: | pd.Timestamp("2013-04-12 00:00:00") | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
# In[3]:
sub_1_p = pd.read_csv('./output/submission_1020.csv')
sub_2_p = pd.read_csv('./output/submission_1021.csv')
sub_3_p = pd.read_csv('./output/submission_12345.csv')
sub_4_p = pd.read_csv('./output/submission_1234.csv')
sub_5_p = pd.read_csv('./output/submission_2017.csv')
sub_6_p = pd.read_csv('./output/submission_4242.csv')
sub_7_p = pd.read_csv('./output/submission_77777.csv')
sub_8_p = pd.read_csv('./output/submission_8895.csv')
# In[4]:
total_type_p = pd.DataFrame()
total_type_p['sub_1020'] = sub_1_p['scalar_coupling_constant']
total_type_p['sub_1021'] = sub_2_p['scalar_coupling_constant']
total_type_p['sub_12345'] = sub_3_p['scalar_coupling_constant']
total_type_p['sub_1234'] = sub_4_p['scalar_coupling_constant']
total_type_p['sub_2017'] = sub_5_p['scalar_coupling_constant']
total_type_p['sub_4242'] = sub_6_p['scalar_coupling_constant']
total_type_p['sub_77777'] = sub_7_p['scalar_coupling_constant']
total_type_p['sub_8895'] = sub_8_p['scalar_coupling_constant']
# In[5]:
total_type_p['type'] = | pd.read_csv('type.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 12:30:30 2021
@author: sahand
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
dir_root = '/home/sahand/GoogleDrive/Data/Corpus/Dimensions AI unlimited citations/clean/' # ryzen
# dir_root = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/Dimensions/' # c1314
# =============================================================================
# Label prep - cleanup of data without label
# =============================================================================
categories = pd.read_csv(dir_root+'corpus category_for',names=['cat'])
data = pd.read_csv(dir_root+'publication idx',names=['id'])
data['cat'] = categories['cat']
# data['cat'] = data.cat.str.replace('[','').str.replace(']','').str[1:-1].str.split('}, {')
# data['cat1'] = data['cat'][0]
pub_ids = pd.DataFrame(data['id'])
data = data[pd.notnull(data['cat'])]
categories = categories[pd.notnull(categories['cat'])]
categories.to_csv(dir_root+'corpus category_for',index=False,header=False)
pub_ids_mask = pd.DataFrame(data['id'])
pub_ids_mask.to_csv(dir_root+'publication idx',index=False,header=False)
pub_ids_mask = data['id'].values.tolist()
# filtering operation:
f_name = 'abstract_title pure US'
corpus = pd.read_csv(dir_root+''+f_name,names=['data'])
corpus['id'] = pub_ids['id']
corpus = corpus[corpus['id'].isin(pub_ids_mask)]
corpus = corpus.drop('id',axis=1)
corpus = corpus[pd.notna(corpus['data'])]
corpus.to_csv(dir_root+''+f_name+' with id',index=False,header=False)
# =============================================================================
# Label prep - separate labels and clean
# =============================================================================
categories = pd.read_csv(dir_root+'corpus category_for',names=['cat'])
data = | pd.read_csv(dir_root+'publication idx',names=['id']) | pandas.read_csv |
from django.db import models
from django.utils import timezone
from django.db.models import Q
import asyncio
from ib_insync import IB, Stock, MarketOrder, util
from core.common import empty_append
from core.indicators import rel_dif
import vectorbtpro as vbt
import sys
import math
import pandas as pd
import numpy as np
from trading_bot.settings import (PERFORM_ORDER, USE_IB_FOR_DATA,DIC_PERFORM_ORDER,
IB_LOCALHOST, IB_PORT)
### Interactive brockers and data retrieval ###
'''
Contains:
- Communication with Interactive brokers
- Retrieval of live data (Interactive brokers or YFinance)
- Performing order
- Models for financial products, stock exchanges...
Note: for some reasons, it does not work if myIB class is not in models
'''
## All symbols must be from same stock exchange
def retrieve_data(symbols,period,**kwargs):
try:
IBok=True
for symbol in symbols:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if action.stock_ex.ib_ticker in ["BVME.ETF"]:
IBok=False
break
index_symbol=exchange_to_symbol(action)
if (USE_IB_FOR_DATA and IBok) or kwargs.get("useIB",False):
fig= ''.join(x for x in period if x.isdigit())
if period.find("d")!=-1:
period_ib=fig +" D"
elif period.find("mo")!=-1:
period_ib=fig +" M"
elif period.find("y")!=-1:
period_ib=fig +" Y"
#Time period of one bar. Must be one of: ‘1 secs’, ‘5 secs’, ‘10 secs’ 15 secs’, ‘30 secs’, ‘1 min’, ‘2 mins’, ‘3 mins’, ‘5 mins’, ‘10 mins’, ‘15 mins’, ‘20 mins’, ‘30 mins’, ‘1 hour’, ‘2 hours’, ‘3 hours’, ‘4 hours’, ‘8 hours’, ‘1 day’, ‘1 week’, ‘1 month’.
if kwargs.get("interval",False):
fig= ''.join(x for x in kwargs.get("interval") if x.isdigit())
if period.find("m")!=-1:
interval=fig +" mins"
elif period.find("h")!=-1:
interval=fig +" hours"
elif period.find("d")!=-1:
interval=fig +" day"
else:
interval='1 day'
open_=[]
close=[]
low=[]
high=[]
myIB=MyIB()
for symbol in symbols:
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib, #"10 D","1 M"
barSizeSetting=interval, #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
open_=empty_append(open_,df["open"].values,axis=1)
close=empty_append(close,df["close"].values,axis=1)
high=empty_append(high,df["high"].values,axis=1)
low=empty_append(low,df["low"].values,axis=1)
volume=empty_append(low,df["volume"].values,axis=1)
cours_open=pd.DataFrame(data=open_,index=df["date"],columns=symbols)
cours_close=pd.DataFrame(data=close,index=df["date"],columns=symbols)
cours_low=pd.DataFrame(data=low,index=df["date"],columns=symbols)
cours_high=pd.DataFrame(data=high,index=df["date"],columns=symbols)
cours_volume= | pd.DataFrame(data=volume,index=df["date"],columns=symbols) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from datetime import date
import dash_loading_spinners as dls
from dash.dependencies import Input, Output, ClientsideFunction, State
from app import app
import requests
features = ["Screw Speed", "Steam Flow Rate", "Steam Pressure", "Oven-Home Temperature",
"Water Temperature", "Oxygen_pct", "Oven-Home Pressure", "Combustion Air Pressure",
"Temperature before prear", "Temperature after prear", "Burner Position", "Burner_pct",
"Borra Flow Rate_kgh", "Cisco Flow Rate_kgh"]
cardtab_11 = dbc.Card([
dls.Hash(
dcc.Graph(id="graph-fuel", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
cardtab_21 = dbc.Card([
dls.Hash(
dcc.Graph(id="graph-distribution2", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
card_31 = dbc.Card(
[
dbc.Col([
dbc.Col([
html.P(
"Select date range that you want to see:"
),
dcc.DatePickerRange(
id='my-date-picker-range2',
min_date_allowed=date(2020, 10, 1),
max_date_allowed=date(2021, 6, 30),
initial_visible_month=date(2020, 10, 1),
end_date=date(2021, 6, 30),
clearable=True,
month_format="MMMM, YYYY",
with_portal=True,
number_of_months_shown=3
)
]),
html.Hr(),
dbc.Col([
html.P(
"Select the data frequency:"
),
dbc.RadioItems(
id='frequency-radioitems2',
labelStyle={"display": "inline-block"},
options= [
{"label": "Daily", "value": "data_daily"},
{"label": "Hourly", "value": "data_hourly"}
], value= "data_daily",
style= {"color": "black"}
)
])
])
])
card_41 = dbc.Card([
dbc.Col([
dbc.FormGroup([
dbc.Label("Y - Axis"),
dcc.Dropdown(
id="y-variable2",
options=[{
"label": col,
"value": col
} for col in features],
value="Steam Flow Rate",
),
]),
html.H6("Efficiency Range"),
dcc.RangeSlider(
id='slider-efficiency2',
min=0,
max=1.00,
step=0.01,
value=[0, 1.00]
),
html.P(id='range-efficiency2')
])
])
card_51 = dbc.Card([
dls.Hash(
dcc.Graph(id="graph-comparison2", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
layout= [
html.Div([
# html.Img(
# src = "/assets/images/C1_icon_1.png",
# className = "corr-icon"
# ),
html.Img(
src = "/assets/images/Buencafe-logo.png",
className = "corr-icon"
),
html.H2(
"Fuel Analytics",
className = "content-title"
),
html.Div(children=[
html.Div([
dbc.Tabs([
dbc.Tab(cardtab_11, label="Time series"),
dbc.Tab(cardtab_21, label="Distribution"),
],
id="card-tabs2",
card=True,
active_tab="tab-1",
),
card_31,
], className = "graph_col_1"),
html.Div(children =[
card_41,
card_51
], className = "data_col_2")
], className = "wrapper__steam-data wrapper__fuel-data")
],className = "wrapper__steam wrapper__fuel"),
]
# layout= [
# html.Div([
# html.Img(
# src = "/assets/images/C1_icon_1.png",
# className = "corr-icon"
# ),
# html.H2(
# "Fuel Analytics",
# className = "content-title"
# ),
# html.Div([
# dbc.Row([
# dbc.Col(
# dbc.Tabs([
# dbc.Tab(cardtab_11, label="Time series"),
# dbc.Tab(cardtab_21, label="Distribution"),
# ],
# id="card-tabs2",
# card=True,
# active_tab="tab-1",
# ),
# width=9
# ),
# dbc.Col(
# card_31, width=3
# )
# ]),
# dbc.Row([
# dbc.Col(
# card_41, width=3
# ),
# dbc.Col(
# card_51, width=9
# )
# ]),
# ])
# ],
# className = "corr-icon-container"
# )
# ]
@app.callback(
Output('graph-fuel','figure'),
[Input('my-date-picker-range2', 'start_date'),
Input('my-date-picker-range2', 'end_date'),
Input('frequency-radioitems2', 'value')]
)
def update_figure(start_date, end_date, value_radio):
try:
if value_radio == "data_daily":
query = "SELECT * FROM daily"
payload = {
"query": query
}
petition = requests.post('https://k8nmzco6tb.execute-api.us-east-1.amazonaws.com/dev/data',payload)
test_var = petition.json()['body']
data2 = pd.DataFrame(test_var)
data2['Time'] = pd.to_datetime(data2['Time']).dt.date.astype("datetime64[ns]")
# print("Llegada ", data2['Time'].value_counts())
data2.set_index(["Time"], inplace=True)
elif value_radio == "data_hourly":
query = "SELECT * FROM hourly"
payload = {
"query": query
}
petition = requests.post('https://k8nmzco6tb.execute-api.us-east-1.amazonaws.com/dev/data',payload)
test_var = petition.json()['body']
data2 = pd.DataFrame(test_var)
data2['Time'] = | pd.to_datetime(data2['Time']) | pandas.to_datetime |
import pandas as pd
from sqlalchemy import create_engine
from nyc_ccci_etl.commons.configuration import get_database_connection_parameters
class DataPreparator:
def __init__(self):
host, database, user, password = get_database_connection_parameters()
engine_string = "postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}".format(
user = user,
password = password,
host = host,
port = 5432,
database = database,
)
self.engine = create_engine(engine_string)
def split_train_test(self):
tabla_3 = pd.read_sql_table('centers', self.engine, schema="transformed")
tabla_4 = | pd.read_sql_table('inspections', self.engine, schema="transformed") | pandas.read_sql_table |
"""Text Prediction Model based on Pretrained Language Model. Version 1"""
from typing import Optional
import collections
import logging
import pandas as pd
import os
import random
import numpy as np
from ..abstract.abstract_model import AbstractModel
from ...features.feature_metadata import R_OBJECT, R_INT, R_FLOAT, R_CATEGORY, \
S_TEXT_NGRAM, S_TEXT_AS_CATEGORY, S_TEXT_SPECIAL
logger = logging.getLogger(__name__)
AG_TEXT_IMPORT_ERROR = 'autogluon.text has not been installed. ' \
'You may try to install "autogluon.text" first by running. ' \
'`python3 -m pip install autogluon.text`'
class TextPredictionV1Model(AbstractModel):
nn_model_name = 'text_nn'
def __init__(self, **kwargs):
"""The TextPredictionV1Model.
The features can be a mix of
- text column
- categorical column
- numerical column
The labels can be categorical or numerical.
Parameters
----------
path
The directory to store the modeling outputs.
name
Name of subdirectory inside path where model will be saved.
problem_type
Type of problem that this model will handle.
Valid options: ['binary', 'multiclass', 'regression'].
eval_metric
The evaluation metric.
num_classes
The number of classes.
stopping_metric
The stopping metric.
model
The internal model object.
hyperparameters
The hyperparameters of the model
features
Names of the features.
feature_metadata
The feature metadata.
debug
Whether to turn on debug mode
"""
super().__init__(**kwargs)
self._label_column_name = None
self._numeric_columns = None
self._cat_columns = None
def _preprocess(self, X: pd.DataFrame, fit=False, **kwargs):
if fit:
self._numeric_columns = self.feature_metadata.get_features(valid_raw_types=[R_INT, R_FLOAT])
self._cat_columns = self.feature_metadata.get_features(valid_raw_types=[R_CATEGORY])
if self._numeric_columns:
X[self._numeric_columns] = X[self._numeric_columns].fillna(-1) # FIXME v0.1: Make this more sophisticated, such as mean.
if self._cat_columns:
X[self._cat_columns] = X[self._cat_columns].astype('object') # FIXME v0.1: Avoid this unnecessary conversion.
# FIXME v0.1: This will crash if NaNs are present at test time.
# X[self._cat_columns] = X[self._cat_columns].fillna(0) # FIXME v0.1: Make this more sophisticated. This is not correct.
return X
def _build_model(self, X_train, y_train, X_val, y_val, hyperparameters):
try:
from autogluon.text.text_prediction.text_prediction \
import ag_text_prediction_params, merge_params, get_column_properties, \
infer_problem_type, infer_eval_stop_log_metrics
from autogluon.text.text_prediction.models.basic_v1 import BertForTextPredictionBasic
except ImportError:
raise ImportError(AG_TEXT_IMPORT_ERROR)
# Decide the name of the label column
if 'label' in X_train.columns:
label_col_id = 0
while True:
self._label_column_name = 'label{}'.format(label_col_id)
if self._label_column_name not in X_train.columns:
break
label_col_id += 1
else:
self._label_column_name = 'label'
if X_val is not None:
concat_feature_df = pd.concat([X_train, X_val])
concat_feature_df.reset_index(drop=True, inplace=True)
concat_label_df = pd.DataFrame({self._label_column_name: | pd.concat([y_train, y_val]) | pandas.concat |
#Copyright 2019 <NAME>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import numpy as np
import pandas as pd
import statsmodels.api as sm
def forwardSelection(X, y, model_type ="linear",elimination_criteria = "aic", varchar_process = "dummy_dropfirst", sl=0.05):
"""
Forward Selection is a function, based on regression models, that returns significant features and selection iterations.\n
Required Libraries: pandas, numpy, statmodels
Parameters
----------
X : Independent variables (Pandas Dataframe)\n
y : Dependent variable (Pandas Series, Pandas Dataframe)\n
model_type : 'linear' or 'logistic'\n
elimination_criteria : 'aic', 'bic', 'r2', 'adjr2' or None\n
'aic' refers Akaike information criterion\n
'bic' refers Bayesian information criterion\n
'r2' refers R-squared (Only works on linear model type)\n
'r2' refers Adjusted R-squared (Only works on linear model type)\n
varchar_process : 'drop', 'dummy' or 'dummy_dropfirst'\n
'drop' drops varchar features\n
'dummy' creates dummies for all levels of all varchars\n
'dummy_dropfirst' creates dummies for all levels of all varchars, and drops first levels\n
sl : Significance Level (default: 0.05)\n
Returns
-------
columns(list), iteration_logs(str)\n\n
Not Returns a Model
Tested On
---------
Python v3.6.7, Pandas v0.23.4, Numpy v1.15.04, StatModels v0.9.0
See Also
--------
https://en.wikipedia.org/wiki/Stepwise_regression
"""
X = __varcharProcessing__(X,varchar_process = varchar_process)
return __forwardSelectionRaw__(X, y, model_type = model_type,elimination_criteria = elimination_criteria , sl=sl)
def backwardSelection(X, y, model_type ="linear", elimination_criteria = "aic", varchar_process = "dummy_dropfirst", sl=0.05):
"""
Backward Selection is a function, based on regression models, that returns significant features and selection iterations.\n
Required Libraries: pandas, numpy, statmodels
Parameters
----------
X : Independent variables (Pandas Dataframe)\n
y : Dependent variable (Pandas Series, Pandas Dataframe)\n
model_type : 'linear' or 'logistic'\n
elimination_criteria : 'aic', 'bic', 'r2', 'adjr2' or None\n
'aic' refers Akaike information criterion\n
'bic' refers Bayesian information criterion\n
'r2' refers R-squared (Only works on linear model type)\n
'r2' refers Adjusted R-squared (Only works on linear model type)\n
varchar_process : 'drop', 'dummy' or 'dummy_dropfirst'\n
'drop' drops varchar features\n
'dummy' creates dummies for all levels of all varchars\n
'dummy_dropfirst' creates dummies for all levels of all varchars, and drops first levels\n
sl : Significance Level (default: 0.05)\n
Returns
-------
columns(list), iteration_logs(str)\n\n
Not Returns a Model
Tested On
---------
Python v3.6.7, Pandas v0.23.4, Numpy v1.15.04, StatModels v0.9.0
See Also
--------
https://en.wikipedia.org/wiki/Stepwise_regression
"""
X = __varcharProcessing__(X,varchar_process = varchar_process)
return __backwardSelectionRaw__(X, y, model_type = model_type,elimination_criteria = elimination_criteria , sl=sl)
def __varcharProcessing__(X, varchar_process = "dummy_dropfirst"):
dtypes = X.dtypes
if varchar_process == "drop":
X = X.drop(columns = dtypes[dtypes == np.object].index.tolist())
print("Character Variables (Dropped):", dtypes[dtypes == np.object].index.tolist())
elif varchar_process == "dummy":
X = | pd.get_dummies(X,drop_first=False) | pandas.get_dummies |
import csv
import re
import string
import math
import warnings
import pandas as pd
import numpy as np
import ipywidgets as wg
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mtick
from itertools import product
from scipy.optimize import curve_fit
from IPython.display import display
from platemapping import plate_map as pm
# define custom errors
class DataError(Exception):
pass
class PlateSizeError(Exception):
pass
class DataTypeError(Exception):
pass
# define well plate dimensions
plate_dim = {96:(8, 12), 384:(16, 24)}
# define header names for platemapping module
pm.header_names = {'Well ID': {'dtype':str, 'long':True, 'short_row': False, 'short_col':False},
'Type': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Contents': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Protein Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Protein Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Tracer Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Tracer Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Competitor Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Competitor Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Concentration Units':{'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
}
class FA:
"""Class used for the analysis of fluorescence anisotropy data.
:param data_dict: A dictionary contaning data frames with pre-processed data and metadata
:type data_dict: dict
:param g_factor: A value of g-factor
:type g_factor: float
:param plate_map: A data frame with platemap containing information about each well
:type plate_map: pandas df"""
def __init__(self, data_dict, g_factor, plate_map):
self.data_dict = data_dict
self.g_factor = g_factor
self.plate_map = plate_map
# create list of all p and s data frames to run some stats
frames = []
for repeat in self.data_dict.values():
metadata, data = repeat.values()
p_channel, s_channel = data.values()
frames.append(p_channel)
frames.append(s_channel)
new = pd.concat(frames, axis=1) # join all p and s data frames into one df
nan = new.size - new.describe().loc['count'].sum() # find sum of 'nan' cells
# create a data frame to store the final fitting parameters
col_names = ['rmin', 'rmin error', 'rmax', 'rmax error', 'lambda', 'Kd', 'Kd error']
p_names = self.plate_map['Protein Name'].dropna().unique() # get list of all protein names
t_names = self.plate_map['Tracer Name'].dropna().unique() # get list of all tracer names
c_names = self.plate_map['Competitor Name'].dropna().unique() # get list of all competitor names
if len(c_names) == 0: # if there are no comeptitors, replace nan with a string
c_names = ['-']
c_names_print = 'None'
else:
c_names_print = c_names
final_fit = pd.DataFrame(index=pd.MultiIndex.from_product([p_names, t_names, c_names]), columns=col_names)
final_fit["lambda"] = 1 # set the default lambda value as 1
self.final_fit = final_fit
print("Data was uploaded!\n")
print(f"Number of repeats: {len(self.data_dict)} \nValue of g-factor: {self.g_factor} \nOverall number of empty cells is {int(nan)} in {len(frames)} data frames.\nProteins: {p_names}\nTracers: {t_names}\nCompetitors: {c_names_print}\n")
@classmethod
def read_in_envision(cls, data_csv, platemap_csv, data_type='plate', size=384):
"""Reads in the raw data from csv file along with a platemap and constructs the FA class boject.
:param data_csv: File path of the raw data file in .csv format.
:type data_csv: str
:param platemap_csv: File path of the platemap file in .csv format.
:type platemap_csv: str
:param data_type: Format in which the raw data was exported (plate or list), defaults to plate.
:type data_type: str
:param size: Size of the well plate (384 or 96), defaults to 384.
:type size: int
:return: A dictionary contaning data frames with pre-processed data, g-factor and data frame containing platemap.
:rtype: dict, float, pandas df """
# ensure the plate size is either 384 or 96
if size not in plate_dim:
raise PlateSizeError('Invalid size of the well plate, should be 384 or 96.')
# try to read in data in plate format
if data_type == 'plate':
try:
data_dict, g_factor = FA._read_in_plate(data_csv, size) # get data dictionary and g factor
plate_map_df = pm.plate_map(platemap_csv, size) # get platemap using the platemapping module
return cls(data_dict, g_factor, plate_map_df)
except (UnboundLocalError, IndexError, ValueError):
raise DataError(f"Error occured during data read in. Check your file contains data in the 'plate' format and plate size is {size}.")
# try to read in data in list format
if data_type == 'list':
try:
data_dict, g_factor = FA._read_in_list(data_csv, size) # get data dictionary and g factor
plate_map_df = pm.plate_map(platemap_csv, size) # get platemap using the platemapping module
return cls(data_dict, g_factor, plate_map_df)
except (UnboundLocalError, IndexError):
raise DataError("Error occured during data read in. Check your file contains data in the 'list' format.")
else:
raise DataTypeError(f"'{data_type}' is not one of the two valid data types: plate or list.")
def _read_in_plate(csv_file, size):
"""Reads the raw data file and finds the information needed to extract data. Passes those parameters to pre_process_plate function and executes it.
Returns a tuple of two elemnts: dictionary of data frames and g-factor.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A tuple of dictionary of data frames and the g-factor
:rtype: pandas df, float """
with open(csv_file) as file:
all_data_lines = list(csv.reader(file, delimiter=',')) # read the csv file and cast it into a list containing all lines
blank_indexes = list(index for index, item in enumerate(all_data_lines) if item == []) # list containing indices of all blank rows
if blank_indexes == []: # case for the raw data file having commas instead of blank spaces
blank_indexes = list(index for index, item in enumerate(all_data_lines) if set(item) == {''}) # treats a line filled only with commas (empty strings) as balnk
blanks = np.array(blank_indexes) # convert the list of blank indices to a numpy array
read_in_info = [] # list to store the tuples with parameters needed for pandas to read in the csv file
for index, item in enumerate(all_data_lines): # iterate over list with all lines in the csv file
if item != [] and re.findall(r"Plate information", item[0]) == ['Plate information'] and re.search(r'Results for', all_data_lines[index + 9][0]) == None and re.findall(r"Formula", all_data_lines[index+1][10]) != ['Formula']:
skiprows = index + 9 # Set the skiprows parameter for raw data table
skiprows_meta = index + 1 # Set the skiprows parameter for metadata table
end_of_data = blanks[blanks > skiprows].min() # Calculate the end of data table by finding the smallest blank index after the beginning of data table
read_in_info.append((skiprows, end_of_data - skiprows + 1, skiprows_meta)) # add the skiprows, caculated number of data lines and skiprows for metadata parameters to the list as a tuple
data_format = 'plate1'
if item != [] and re.findall(r"Plate information", item[0]) == ['Plate information'] and re.search(r'Results for', all_data_lines[index + 9][0]) != None:
skiprows = index + 10 # Set the skiprows parameter for raw data table
skiprows_meta = index + 1 # Set the skiprows parameter for metadata table
end_of_data = blanks[blanks > skiprows].min() # Calculate the end of data table by finding the smallest blank index after the beginning of data table
read_in_info.append((skiprows, end_of_data - skiprows - 1, skiprows_meta)) # add the skiprows, caculated number of data lines and skiprows for metadata parameters to
data_format = 'plate2'
if item != [] and len(item) > 1 and re.fullmatch(r"G-factor", item[0]):
g_factor = float(item[4])
return FA._pre_process_plate(csv_file, read_in_info, data_format, size), g_factor
def _pre_process_plate(csv_file, read_in_info, data_format, size):
"""Extracts the data and metadata from the csv file, processes it and returns a nested dictionary containing data and metadata for each repeat and channel.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param read_in_info: Tuples with read in parameters for each channel.
:type read_in_info: list
:param data_format: Plate type (plate1 or plate2)
:type data_format: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A dictionary containing data and metadata
:rtype: dict """
data_frames = {} # dictionary to store data frames
counter = 1 # counter incremented by 0.5 to enable alternating labelling of data frames as 'p' or 's'
row_letters = list(string.ascii_uppercase)[0: plate_dim[size][0]] # list of letters for well IDs
col_numbers = list(np.arange(1, plate_dim[size][1] + 1).astype(str)) # list of numbers for well IDs
well_ids = ['%s%s' % (item[0], item[1]) for item in product(row_letters, col_numbers)] # list of well IDs for the pre-processed data frames
for index, item in enumerate(read_in_info): # iterate over all tuples in the list, each tuple contains skiprows, nrows and skiprows_meta for one channel
if data_format == 'plate1': # raw data table does not have row and column names so 'names' parameter passed to omit the last column
raw_data = pd.read_csv(csv_file, sep=',', names=col_numbers, index_col=False, engine='python', skiprows=item[0], nrows=item[1], encoding='utf-8')
if data_format == 'plate2': # raw data table has row and column names, so index_col=0 to set the first column as row labels
raw_data = pd.read_csv(csv_file, sep=',', index_col=0, engine='python', skiprows=item[0], nrows=item[1], encoding='utf-8')
if len(raw_data.columns) in [13, 25]:
raw_data.drop(raw_data.columns[-1], axis=1, inplace=True) # delete the last column because it is empty
# generate df for metadata (number of rows is always 1) and convert measurement time into datetime object
metadata = pd.read_csv(csv_file, sep=',', engine='python', skiprows=item[2], nrows=1, encoding='utf-8').astype({'Measurement date': 'datetime64[ns]'})
# convert and reshape data frame into 1D array
data_as_array = np.reshape(raw_data.to_numpy(), (int(size), 1))
if counter % 1 == 0:
new_data = pd.DataFrame(data=data_as_array, index=well_ids, columns=['p']) # generate new 384 (or 96) by 1 data frame with p channel data
data_frames[f'repeat_{int(counter)}'] = {'metadata':metadata, 'data': {'p': new_data, 's':''}} # add p channel data and metadata dfs to dictionary
if counter % 1 != 0:
new_data = pd.DataFrame(data=data_as_array, index=well_ids, columns=['s']) # generate new 384 (or 96) by 1 data frame with s channel data
data_frames[f'repeat_{int(counter-0.5)}']['data']['s'] = new_data # add s channel data to dictionary
counter = counter + 0.5
return data_frames
def _read_in_list(csv_file, size):
"""Reads the raw data file and extracts the data and metadata. Passes the raw data to pre_process_list function and executes it.
Returns a tuple of two elemnts: dictionary of data frames and g-factor.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A tuple of dictionary of data frames and the g-factor
:rtype: tuple """
with open(csv_file) as file:
all_data_lines = list(csv.reader(file, delimiter=',')) # read the csv file and cast it into a list containing all lines
blank_indexes = list(index for index, item in enumerate(all_data_lines) if item == []) # list containing indexes of all blank rows
if blank_indexes == []: # case for the raw data file having commas instead of blank spaces
blank_indexes = list(index for index, item in enumerate(all_data_lines) if set(item) == {''}) # treats a line filled only with commas (empty strings) as balnk
blanks = np.array(blank_indexes) # convert the list of blank indexes to a numpy array
# iterate over all lines to find beggining of the data table ('skiprows') and determine the format of data (list A, B, or C)
for index, item in enumerate(all_data_lines):
if item != [] and len(item) == 1 and re.findall(r"Plate information", item[0]) == ["Plate information"]:
skiprows_meta = index + 1
end_of_metadata = blanks[blanks > skiprows_meta].min() # find the end of metadata by finding the smallest blank index after the beginning of metadata
if item != [] and len(item) >= 2 and re.findall(r"PlateNumber", item[0]) == ['PlateNumber'] and re.findall(r"PlateRepeat", item[1]) == ['PlateRepeat']: # find line number with the beggining of the data
skiprows = index - 1
data_format = 'listA'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and len(item) >= 2 and re.findall(r"Plate", item[0]) == ['Plate'] and re.findall(r"Barcode", item[1]) == ['Barcode']: # find line number with the beggining of the data
skiprows = index
data_format = 'listB'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and len(item) >= 2 and re.findall(r"Plate", item[0]) == ['Plate'] and re.findall(r"Well", item[1]) == ['Well']:
skiprows = index
data_format = 'listC'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and re.fullmatch(r"G-factor", item[0]): # find the g factor
g_factor = float(item[4])
nrows = end_of_data - skiprows - 1 # calculate the length of data table
nrows_meta = end_of_metadata - skiprows_meta - 1 # calucalte the length of metadata table (number of rows depends on the number of repeats)
raw_data = pd.read_csv(csv_file, sep=',', engine='python', skiprows=skiprows, nrows=nrows, encoding='utf-8')
raw_metadata = pd.read_csv(csv_file, sep=',', engine='python', skiprows=skiprows_meta, nrows=nrows_meta, encoding='utf-8')
return FA._pre_process_list(raw_data, raw_metadata, data_format, size), g_factor
def _pre_process_list(raw_data, raw_metadata, data_format, size):
"""Extracts the data and metadata for each channel and repeat from the raw data and raw metadata
and returns a nested dictionary containing data and metadata for each repeat and channel.
:param raw_data: Data frame containing raw data
:type raw_data: pandas data frame
:param raw_metadata: Data frame containing raw metadata
:type raw_metadata: pandas data frame
:param data_format: Type of list (listA, listB, or listC)
:type data_format: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A dictionary containing data and metadata
:rtype: dict"""
# remove the '0' from middle position of well numbers (A01 -> A1), done by reassigning the 'Well' column to a Series containing modified well numbers
raw_data['Well'] = raw_data['Well'].apply(lambda x: x[0] + x[2] if x[1] == '0' else x)
data_frames = {} # dictionary to store data frames
repeats = list(raw_metadata['Repeat'].to_numpy()) # generate a list with repeats based on the metadata table, e.g. for 3 repeats -> [1,2,3]
row_letters = list(string.ascii_uppercase)[0: plate_dim[size][0]] # list of letters for well IDs
col_numbers = list(np.arange(1, plate_dim[size][1] + 1).astype(str)) # list of numbers for well IDs
well_ids = ['%s%s' % (item[0], item[1]) for item in product(row_letters, col_numbers)] # list of well IDs for the pre-processed data frames
for index, repeat in enumerate(repeats): # iterate over the number of repeats
if data_format == 'listA':
groupped_data = raw_data.groupby(raw_data.PlateRepeat).get_group(repeat) # group and extract the data by the plate repeat column, i.e. in each iteration get data only for the current repeat
p_groupped = groupped_data.iloc[::3, :] # extract data only for the p channel, i.e. each third row starting from the first row
s_groupped = groupped_data.iloc[1::3, :] # extract data only for the s channel, i.e. each third row starting from the second row
p_raw_data = p_groupped[['Well', 'Signal']] # extract only the two relevant columns
s_raw_data = s_groupped[['Well', 'Signal']] # for each channel
if data_format in ['listB', 'listC']:
# the column naming is different for the first repeat ('Signal'), then it's 'Signal.1', 'Signal.2', etc.
if repeat == 1:
p_raw_data = raw_data[['Well', 'Signal']]
s_raw_data = raw_data[['Well', f'Signal.{repeat}']]
else:
p_raw_data = raw_data[['Well', f'Signal.{repeat + index - 1}']] # the column cotntaining data to be extracted is calculated in each iteration
s_raw_data = raw_data[['Well', f'Signal.{repeat + index}']]
# create an empty df with no columns and indexes matching the plate size
indexes = pd.DataFrame(well_ids, columns=['Wells'])
empty_frame = indexes.set_index('Wells')
p_raw_data.set_index('Well', inplace=True) # set the row indexes as the well numbers
p_raw_data.set_axis(['p'], axis=1, inplace=True) # rename the 'Signal' column to 'p'
p_data = empty_frame.join(p_raw_data) # join the raw data df to an empty frame based on the indexes, assigns 'NaN' to indexes not present in the raw data table
s_raw_data.set_index('Well', inplace=True)
s_raw_data.set_axis(['s'], axis=1, inplace=True)
s_data = empty_frame.join(s_raw_data)
metadata = raw_metadata.iloc[[repeat-1]].astype({'Measurement date': 'datetime64[ns]'}) # extract the row with metadata relevant for each repeat and covert date and time into a datetime object
data_frames[f'repeat_{repeat}'] = {'metadata': metadata, 'data': {'p': p_data, 's': s_data}} # add data frames to the dictionary
return data_frames
def visualise(self, labelby='Type', colorby='Type', title="", cmap='rainbow', blank_yellow=True, scale='lin', dpi=250, export=False):
"""Returns a visual representation of the plate map.
The label and colour for each well can be customised to be a platemap variable, for example 'Type', 'Protein Name', 'Protein Concentration', etc.
It can also be the p or s channel value, calculated anisotropy or intensity, however in such cases the 'colorby' or 'labelby'
parameters must be passed as tuple of two strings specifying the repeat number and variable to display, for example ('repeat_2', 'p_corrected').
:param labelby: Variable to display on the wells, for example 'Type', 'Protein Name', ('repeat_1', 's_corrected'), defaults to 'Type'.
:type labelby: str or tuple of str
:param colorby: Variable to color code by, for example 'Type', 'Contents', 'Protein Concentration', ('repeat_2', 'p'), for non-categorical data the well coulour represnets the magnitude of the number, defaults to 'Type'.
:type colorby: str or tuple of str
:param title: Sets the title of the figure, defaults to None.
:type title: str
:param cmap: Sets the colormap for the color-coding, defaults to 'rainbow'.
:type cmap: str
:param blank_yellow: Sets the colour-coding of blank wells as yellow, defaults to True.
:type blank_yellow: bool
:param scale: Determines whether data for colour-coding of non-categorical data (e.g. 'p_chanel', 'r_corrected') is scaled linearly ('lin') or logarithmically ('log', works only if data does not contain values less than or equal 0), wdefaults to 'lin'.
:type scale: str
:param dpi: Resolution of the exported figure in points per inches, defaults to 250.
:type dpi: int
:param export: If True, save the figure as .png file, defaults to False.
:type export: bool
:return: Visual representation of the plate map.
:rtype: figure
"""
plate_map = self.plate_map # default platemap
size = plate_map.shape[0]
str_format, str_len = None, None # default string format and lengh (used for categorical types, e.g. 'Type', 'Protein Name', etc.)
noncat_vars = ['p','s','p_corrected','s_corrected','r_raw','r_corrected','i_raw','i_corrected','i_percent'] # list of non-categorical data
scinot_vars = noncat_vars[:-1] + ['Protein Concentration', 'Tracer Concentration', 'Competitor Concentration'] # types that may have to be formatted in scinot (all non-categorical types except of i_percent)
if type(labelby) == tuple: # option for labelling by the a variable and its repeat number
plate_map = self.plate_map.join(self.data_dict[labelby[0]]['data'][labelby[1]]) # data frame containing variable from specified repeat is added to the platemap
labelby = labelby[1] # reassign labelby as the variable name
if labelby == 'i_percent':
str_format = 'percent' # display the values to 1 decimal place
str_len = 3 # determine the length of string to avoid issues with incorrect font scaling
if type(colorby) == tuple: # option for colouring by the a variable and its repeat number
plate_map = self.plate_map.join(self.data_dict[colorby[0]]['data'][colorby[1]]) # data frame containing variable from specified repeat is added to the platemap
colorby = colorby[1] # reassign colorby as the variable name
if labelby in scinot_vars: # check if the data needs to be displyed in scientific notation
if sum((plate_map[labelby] > 1000) | (plate_map[labelby] < 0)) > 0: # format in sci notation if the number is greater than 1000 or less than 0
str_format = 'scinot'
str_len = 8 # determine the length of string to avoid issues with incorrect font scaling
if colorby in noncat_vars:
categorical = False # colours for colour-coding are generated based on normalised data from colorby column
else:
categorical = True # colurs for colour-coding are generated based on an array of uniformally spaced numbers representing each category
return pm.visualise(plate_map, title, size, export, cmap, colorby, labelby, dpi, str_format=str_format, str_len=str_len, blank_yellow=blank_yellow, scale=scale, categorical=categorical)
def invalidate(self, valid=False, **kwargs):
"""Invalidates wells, entire columns and/or rows. Any of the following keyword arguments, or their combination,
can be passed: wells, rows, columns. For example, to invalidate well A1, rows C and D and columns 7 and 8 execute
the following: invalidate(wells='A1', rows=['C','D'], columns=[7,8]).
To validate previously invalidated wells, rows and/or columns, pass the additional 'valid' argument as True.
:param valid: Sets the stipulated well, row or column invalid ('False') or valid ('True'), defaults to False.
:type valid: bool
:param wells: Wells to be invalidated passed as a string or list of strings.
:type wells: str or list of str
:param rows: Rows to be invalidated passed as a string or list of strings.
:type rows: str or list of str
:param columns: Columns to be invalidated passed as an integer or list of integers.
:type columns: int or list of int
"""
# execute the corresponding invalidate functon from the platemapping package
if 'wells' in kwargs:
pm.invalidate_wells(platemap=self.plate_map, wells=kwargs['wells'], valid=valid)
if 'rows' in kwargs:
rows = tuple(kwargs['rows']) # convert the rows to tuple because invalidate_rows cannot take in a list
pm.invalidate_rows(platemap=self.plate_map, rows=rows, valid=valid)
if 'columns' in kwargs:
pm.invalidate_cols(platemap=self.plate_map, cols=kwargs['columns'], valid=valid)
if len(kwargs) == 0: # return error if neither of the keyword arguments is passed
raise TypeError('No arguments were passed. Specify the wells, rows and/or columns to be invalidated!')
def background_correct(self):
"""Calculates background corrected values for p and s channel in all repeats.
The backgorund correction is done by subtracting the mean value of blank p (or s) channel intensity for a given
protein, tracer or competitor concentration from each non-blank value of the p (or s) channel intensity for that concentration.
"""
for key, value in self.data_dict.items():
metadata, data = value.values()
# calculate p and s corrected data frame using _background_correct func and add it to data dictionary
self.data_dict[key]['data']['p_corrected'] = FA._background_correct(data['p'], self.plate_map)
self.data_dict[key]['data']['s_corrected'] = FA._background_correct(data['s'], self.plate_map)
print('Background correction was successfully performed!')
def _background_correct(data, platemap):
"""Calculates background corrected p or s channel values for protein/titration or competition experiment.
:param data: Data frame with raw p or s channel values
:type data: pandas df
:param platemap: Data frame with platemap
:type platemap: pandas df
:return: Data frame with background corrected values
:rtype: pandas df
"""
df = platemap.join(data) # join p or s channel data to platemap
df[df.columns[-1]] = df[df.columns[-1]][df['Valid'] == True] # replace 'p' or 's' values with NaN if the well is invalidated
col_name = df.columns[-1] + '_corrected'
no_index = df.reset_index() # move the 'well id' index to df column
columns = ['Type','Protein Name','Protein Concentration','Tracer Name','Tracer Concentration','Competitor Name','Competitor Concentration']
# create a multindex df to which blank df will be joined
mindex = pd.MultiIndex.from_frame(no_index[columns]) # create multiindex
reindexed = no_index.set_index(mindex).drop(columns, axis=1) # add multiindex to df and drop the columns from which multiindex was created
mean = no_index.groupby(columns, dropna=False).mean().drop('Valid', axis=1).drop('empty', axis=0) # calculate mean for each group of three wells and remove 'Valid' column
mean.rename(columns={mean.columns[-1]: 'Mean'}, inplace=True) # rename the last column to 'Mean' to avoid errors during joining
blank = mean.xs('blank', level=0, drop_level=True) # take a group with only blank wells
reset_idx = blank.reset_index() # move multiindex to df
nans = [col for col in reset_idx.columns if reset_idx[col].dropna().empty] # list of all columns containing only 'nan' values
d = reset_idx.drop(labels=nans, axis=1) # delete all columns containing only 'nan' values
blank2 = d.set_index(pd.MultiIndex.from_frame(d.loc[:,d.columns[:-1]])).drop(d.columns[:-1], axis=1) # multi index to the remaining columns
joined = reindexed.join(blank2, on=list(blank2.index.names)) # join the blank mean data on the indexes only from blank df
joined[col_name] = joined[joined.columns[-2]] - joined[joined.columns[-1]] # calculate background corrected values
jindexed = joined.set_index('index', append=True).reset_index(level=[0,1,2,3,4,5,6]).rename_axis(None) # set index to 'well id' and move multiindex to df columns
return jindexed[[col_name]] # extract and return df with corrected values
def calc_r_i(self, correct=True, plot_i=True, thr=80):
"""Calculates anisotropy and fluorescence intensity for each well in all repeats using the raw and background corrected p and s channel data.
The fluorescence intensity (I) and anisotropy (r) are calculated using the follwing formulas: I = s + (2*g*p) for intensity and
r = (s - (g*p)) / I for anisotropy. Results are stored in the following data frames: i_raw and r_raw (calculated using the uncorrected
p and s channel values) and i_corrected and r_corrected (calculated using the background corrected p and s channel values).
The function also calculates the percentage intesity of the non blank wells as comapred to the blank corrected wells using the formula:
(raw intensity - corrected intensity) / raw intensity * 100%. If 'plot_i=True', the graph of percentage intenstiy against the
well ids for all repeats is displayed along with a summary of wells above the threshold (defaults to 80%).
:param correct: Calculate the anisotropy and intensity using the background corrected values of p and s channel data, defaults to True.
:type correct: bool
:param plot_i: Display plots of the percentage intensity against well ids for all repeats, defaults to True.
:type plot_i: bool
:param thr: Percentage intensity above which the wells are included in the summary if plot_i=True, defaults to 80.
:type thr: int
"""
FA.th = thr # assign the threshold value to the class variable so that it can be accessed by functions that are not class methods
for key, value in self.data_dict.items(): # iterate over all repeats
metadata, data = value.values()
# calculate raw intensity and anisotropy using _calc_r_i function and add them to data dictionary
i, r = FA._calc_r_i(data['p'], data['s'], self.g_factor, 'raw')
self.data_dict[key]['data']['i_raw'] = i
self.data_dict[key]['data']['r_raw'] = r
if correct: # calculate intensity and anisotropy using background corrected values of p and s
if 'p_corrected' and 's_corrected' not in data: # check if background subtraction has been performed
raise AttributeError('The corrected anisotropy and intensity can only be calculated after background correction of the raw p and s channel data.')
i_c, r_c = FA._calc_r_i(data['p_corrected'], data['s_corrected'], self.g_factor, 'corrected')
self.data_dict[key]['data']['i_corrected'] = i_c
self.data_dict[key]['data']['r_corrected'] = r_c
# calculate intensity percentage data and add it to data dict
self.data_dict[key]['data']['i_percent'] = FA._calc_i_percent(i, i_c, self.plate_map)
if plot_i: # plot the percentage intensity against the well ids for all repeats
FA._plot_i_percent(self.data_dict, self.plate_map)
else:
print('The fluorescence intensity and anisotropy were successfully calculated!\n')
def _calc_r_i(p, s, g, col_suffix):
"""Calculates either anisotropy or intensity and labels the resulting dfs according to the col_suffix parameter
:param p: Data frame with p channel data (can be either raw or background corrected)
:type p: pandas df
:param s: Data frame with s channel data (can be either raw or background corrected)
:type s: pandas df
:param g: G-factor
:type g: float
:param col_suffix: Suffix to add to column name of the resulting intensity or anisotropy data frame, e.g. 'raw', 'corrected'
:type col_suffix: str
:return: Two data frames with calculated anisotropy and intensity values
:rtype: tuple of pandas df"""
p_rn = p.rename(columns={p.columns[0]: s.columns[0]}) # rename the col name in p data frame so that both p and s dfs have the same col names to enable calculation on dfs
i = s + (2 * g * p_rn) # calculate intensity
r = (s - (g * p_rn)) / i # and anisotropy
i_rn = i.rename(columns={i.columns[0]: 'i_' + col_suffix}) # rename the col name using the column suffix argument
r_rn = r.rename(columns={r.columns[0]: 'r_' + col_suffix})
return i_rn, r_rn
def _calc_i_percent(ir, ic, platemap):
"""Calculates the percentage intensity of blank wells compared to non-blank wells.
:param ir: Data frame with corrected intensity
:type ir: pandas df
:param ic: Data frame with raw intensity
:type ic: pandas df
:param platemap: Platemap
:type platemap: pandas df
:return: Data frame with percentage intensity data
:rtype: pandas df"""
ir_rn = ir.rename(columns={ir.columns[0]:ic.columns[0]}) # rename the col name in raw intensity df so that it's the same as in corrected intensity df
percent = (ir_rn - ic) / ir_rn * 100
percent.rename(columns={'i_corrected': 'i_percent'}, inplace=True)
return percent
def _plot_i_percent(data_d, platemap):
"""Plots the percentage intensity data against the well ids with a horizontal threshold bar and prints a summary of wells above the
threshold for all non-blank and non-empty cells in all repeats. A single figure with multiple subplots for each repeat is created.
:param data_d: Data dictionary
:type data_d: dict
:param platemap: Platemap needed to subset only the non-blank and non-empty cells
:type platemap: pandas df"""
summary = '' # empty string to which lists of wells to be printed are appended after checking data from each repeat
fig = plt.figure(figsize=(8*int((len(data_d) + 2 - abs(len(data_d) - 2))/2), 4*int( math.ceil((len(data_d))/2)) ), tight_layout=True) # plot a figure with variable size depending on the number subplots (i.e. repeats)
for key, value in data_d.items(): # iterate over all repeats
metadata, data = value.values()
df = platemap.join(data['i_percent'])
df_per = df[(df['Type'] != 'blank') & (df['Type'] != 'empty')] # subset only the non-blank and non-empty cells
plt.subplot(int( math.ceil((len(data_d))/2) ), int( (len(data_d) + 2 - abs(len(data_d) - 2))/2 ), int(key[-1]))
plt.bar(df_per.index, df_per['i_percent']) # plot a bar plot with intensity percentage data
plt.axhline(FA.th, color='red') # plot horizontal line representing the threshold on the bar plot
ax = plt.gca() # get the axis object
ax.set_ylabel('')
ax.set_xlabel('wells')
ax.set_title(f'Repeat {key[-1]}')
ax.yaxis.set_major_formatter(mtick.PercentFormatter()) # set formatting of the y axis as percentage
xlabels = [i if len(i) == 2 and i[1] == '1' else '' for i in list(df_per.index)] # create a list of xtics and xticklabels consiting only of the first wells from a each row
ax.set_xticks(xlabels)
ax.set_xticklabels(xlabels)
wells = list(df_per[df_per['i_percent'] > FA.th].index) # get a list of well ids above the threshold for this repeat
if wells != []: # append wells above the threshold and the repective repeat number to the string with appropriate formatting
summary = summary + f'\tRepeat {key[-1]}: {str(wells)}\n'
plt.show() # ensure the figure is displayed before printing the summary message
if summary != '': # display the summary of wells above the threshold
print(f'In the following wells the percentage intensity value was above the {FA.th}% threshold:')
print(summary)
else:
print(f'None of the wells has the percentage intensity value above the {FA.th}% threshold.')
def plot_i_percent(self):
"""Disply the graph of percentage intesity of the non blank wells as comapred to the blank corrected wells against well ids for all repeats."""
return FA._plot_i_percent(self.data_dict, self.plate_map)
def calc_mean_r_i(self):
"""Calculates the mean anisotropy and intensity over the number of replicates for each specific protein, tracer
or competitor concentration along with standard deviation and standard error.
This data is required for fitting a logistic curve to anisotropy and intensity plots.
"""
for key, value in self.data_dict.items():
metadata, data = value.values()
# create dictionaries 'r_mean'and 'i_mean' containing mean anisotropy and intensity data frames for each protein-tracer-competitor
data['r_mean'] = FA._calc_mean_r_i(data['r_corrected'], self.plate_map)
data['i_mean'] = FA._calc_mean_r_i(data['i_corrected'], self.plate_map)
# create data frame for storing the fitting params and set lambda value to 1
cols = ['rmin','rmin error', 'rmax', f'rmax error', 'r_EC50', 'r_EC50 error', 'r_hill', 'r_hill error', 'Ifree',
'Ifree error', 'Ibound', 'Ibound error', 'I_EC50', 'I_EC50 error', 'I_hill', 'I_hill error', 'lambda']
data['fit_params'] = pd.DataFrame(index=self.final_fit.index, columns=cols)
data['fit_params']['lambda'] = 1
if set(self.final_fit.index.get_level_values(2).unique()) != {'-'}: # if it is a competition experiment create also data frme for storing the ic50 curve fitting params
cols_comp = ['min','min error', 'max', 'max error', 'IC50', 'IC50 error', 'hill', 'hill error']
data['fit_params_com'] = pd.DataFrame(index=self.final_fit.index, columns=cols_comp)
print('The mean anisotropy and intensity were successfully calculated.')
def _calc_mean_r_i(df, plate_map):
"""Calculates mean anisotropy for each protein (or tracer or competitor) concentration value, its standard deviation and standard error.
:param df: Data frame with anisotropy or intensity values
:type df: pandas df
:param plate_map: Plate map data frame
:type plate_map: pandas df
:return: A dictionary of data frames for each unique protein-tracer-competitor
:rtype: dict"""
join = plate_map.join(df) # join anisotropy or intensity df to platemap df
subset = join[(join['Type'] != 'blank') & (join['Type'] != 'empty')] # use only the non-blank and non-empty cells
noidx = subset.reset_index()
columns = ['Protein Name','Protein Concentration','Tracer Name','Tracer Concentration','Competitor Name','Competitor Concentration']
group = noidx.groupby(columns, dropna=False)
mean = group.mean()
std = group.std()
sem = group.sem()
meanr = mean.rename(columns={mean.columns[-1]: 'mean'}).drop('Valid', axis=1) # rename the mean column and remove the 'Valid' column
stdr = std.rename(columns={std.columns[-1]: 'std'}).drop('Valid', axis=1) # rename the std column and remove the 'Valid' column
semr = sem.rename(columns={sem.columns[-1]: 'sem'}).drop('Valid', axis=1) # rename the sem column and remove the 'Valid' column
merge = pd.concat([meanr, stdr, semr], axis=1)
tosplit = merge.reset_index().fillna({'Competitor Name': '-'}) # remove multiindex and in case of protein/tracer titration set competitor name as '-'
split = dict(tuple(tosplit.groupby(['Protein Name', 'Tracer Name', 'Competitor Name'], dropna=False))) # split df based on multiindex so that a new df is created for each unique combination of protein, tracer and competitor
return split
def calc_lambda(self, approve=True):
"""Calculates lambda value for each protein-tracer pair for all repeats and, if approve=True, displays them so that
a single value can be saved for each protein-tracer pair which will be used in subsequent calculations.
:param approve: Display lambda, rmin and rmax values for each protein-tracer pair and for all repeats, defaults to True.
:type approve: bool
"""
w_info = [] # list of tuples with info (rep no, lambda value, etc) needed to generate the widgets
for key, value in self.data_dict.items(): # iterate over all repeats
metadata, data = value.values()
df = data['fit_params'].copy() # create a copy of the fitting params df
df['lambda'] = df['Ibound'] / df['Ifree'] # calculate the lambda value in a copied data frame
if approve == False:
self.data_dict[key]['data']['fit_params']['lambda'] = df['lambda'] # add the lambda values to fitting params df
print('The lambda values were calculated and saved.')
else:
for ptc in list(df.index): # iterate over each protein-tracer pair and create tuples with info needed for generation of widgets
rating = 100 # place for the rating function
info = (key, ptc, rating, df.loc[ptc, "lambda"], data['fit_params'].loc[ptc, "rmin"], data['fit_params'].loc[ptc, "rmax"]) # tuples conataining repeat no., calculated lambda, and protein-tracer names
w_info.append(info)
if approve == True: # execute the function for displying and handling the widgets
return FA._widget(self.data_dict, w_info, self.final_fit, df)
def _widget(data_dict, w_info, final_fit, df):
"""Function for generating and displaying the widgets with lambda values.
It generates widgets for each tuple in the w_info list.
:param data_dict: Data dictionary
:type data_dict: dict
:param w_info: A list of tuples containg information needed for the generation of widgets
:type w_info: list
:param final_fit: Data frame with final fitting parameters
:type final_fit: pandas df
:param df: Data frame with calculated lambda values
:type df: pandas df
"""
w_info.sort(key=lambda x: x[1]) # sort the tuples by the protein name so that the widgets are displayed by protein-tracer name
reps = [wg.HTML(f"Repeat {i[0][-1]}") for i in w_info] # list of text widgets with repeat numbres
proteins = [wg.HTML(f"{i[1][0]}") for i in w_info] # list of text widgets with protein names
tracers = [wg.HTML(f"{i[1][1]}") for i in w_info] # list of text widgets with tracer names
#scores = [wg.HTML(f"Score: {i[2]}") for i in w_info]
lambdas = [wg.Checkbox(value=False, description="$\lambda$ = %.4f" % (i[3])) for i in w_info] # list of checkbox widgets with lambda values
rminmax = [wg.Checkbox(value=False, description="rmin = %.5f, rmax = %.5f" % (i[4], i[5])) for i in w_info] # list of checkbox widgets with rmin and rmax values
v_lambdas = wg.VBox(lambdas) # group all lambda checkbox widgets into a vertical list layout
v_proteins = wg.VBox(proteins) # group all protein name widgets into a vertical list layout
v_tracers = wg.VBox(tracers) # group all tracer name widgets into a vertical list layout
v_reps = wg.VBox(reps) # group all repeat number widgets into a vertical list layout
#v_scores = wg.VBox(scores)
v_rminmax = wg.VBox(rminmax) # group all rmin and rmax checkbox widgets into a vertical list layout
hbox = wg.HBox([v_proteins, v_tracers, v_reps, v_lambdas, v_rminmax]) # arrange the six vertical boxes into one widget box'
button = wg.Button(description='Save') # create a button for saving the selected values
print("""Choose the lambda values that will be saved for each protein-tracer pair. \nIf you choose more than one lambda value for a given protein-tracer pair, only the first choice will be saved.\nIf you do not choose any lambda value for a given protein-tracer pair the default value of 1 will remain but you still need to select the rmin and rmax for this pair.""")
display(hbox, button) # display the box with widgets and the button
def btn_eventhandler(obj):
"""Function that is executed when the 'Save' button is clicked. It checks which checkboxes were ticked and
updates the final fit df with the calcualted lambda values and/or rmin and rmax values.
Only the first value of lambda for a given protein-tracer will be saved.
"""
added_lambda = [] # protein-tracer pairs for which lambda values were added
added_rminmax = [] # protein-tracer pairs for which rmin and rmax values were added
for i in range(0, len(lambdas)): # iterate over each checkbox widget
index = (proteins[i].value, tracers[i].value, '-') # get the tuple with protein-tracer names
cols = ['rmin','rmin error','rmax','rmax error']
if lambdas[i].value == True: # if the lambda checkbox was ticked, the widget's 'value' attribute is True
if index not in added_lambda: # if lambda for this protein-tracer pair has not yet been added
final_fit.loc[index, "lambda"] = df.loc[index, "lambda"] # add the calculated lambda to the final_fit df
final_fit.loc[index, cols] = data_dict[f'repeat_{reps[i].value[-1]}']['data']['fit_params'].loc[index, cols] #add rmin, rmax and their errors to the final_fit df
added_lambda.append(index)
if rminmax[i].value == True:
if index not in added_lambda and index not in added_rminmax: # if neither lambda nor rmin/rmax for this protein-tracer pair have been added
final_fit.loc[index, cols] = data_dict[f'repeat_{reps[i].value[-1]}']['data']['fit_params'].loc[index, cols]
added_rminmax.append(index)
print('Selected values were saved.')
button.on_click(btn_eventhandler) #link the button event handler function with actual clicking of the button using 'on_click' function
def calc_amount_bound(self):
"""Calculates the amount of fluorescent tracer bound to the protein using the following formula:
L_B =( ( (λ*(rmin-rmax)) / (r-rmin ) +1) )^(-1) * L_T
The amount bound is calculated as a mean for all replicates for each protein, tracer or competitor concentration
along with its standard deviation and standard error.
"""
ptc_list = list(self.final_fit[self.final_fit['rmin'].isna()].index) # list of indexes for which rmin and rmax are not defined
if ptc_list != []:
raise DataError(f"The 'rmin' and 'rmax' values are not defined for the following proteins and tracers: {ptc_list}.\nUse 'calc_lambda' function or 'set_fitparams' to choose 'rmin' and 'rmax' values.")
for key, value in self.data_dict.items():
metadata, data = value.values()
data['amount_bound'] = FA._calc_amount_bound(data['r_corrected'], self.plate_map, self.final_fit) # create dictionary 'r_mean' with mean anisotropy data frames for each protein-tracer pair
print('The amount of fluorescent tracer bound was successfully calculated.')
def _calc_amount_bound(df, platemap, final_fit):
"""Calculates the amount from anisotropy data.
:param df: Data frame with anisotropy values
:type df: pandas df
:param platemap: Plate map data frame
:type platemap: pandas df
:return: A dictionary of data frames for each unique protein-tracer-competitor
:rtype: dict
"""
join_pm = platemap.join(df) # join corrected anisotropy df with the platemap df
subset = join_pm[(join_pm['Type'] != 'blank') & (join_pm['Type'] != 'empty')].fillna({'Competitor Name': '-'}) # take only non-blank and non-empty wells, in case of protein/tracer titration set competitor name as '-'
re_idx = subset.set_index(pd.MultiIndex.from_frame(subset[['Protein Name','Tracer Name','Competitor Name']])).rename_axis([None,None,None]) # replace the index with multiindex (protein-tracer-competitor) and remove its names
join_ff = re_idx.join(final_fit[['rmin','rmax','lambda']]) # join the final_fit df to the anisotropy df on multiindex
# calcualte the amount bound (all parameters needed are already in the data frame)
join_ff['mean'] = (((((join_ff["lambda"] * (join_ff['rmax']-join_ff['r_corrected'])) / (join_ff['r_corrected'] - join_ff['rmin']))) +1) **(-1)) * join_ff['Tracer Concentration']
# remove the redundant columns and set dtype of 'amount' column as float to avoid pandas DataError
drop = join_ff.drop(['r_corrected','Valid', 'rmin', 'rmax', "lambda"], axis=1).astype({'mean': 'float64'})
columns = ['Protein Name','Protein Concentration','Tracer Name','Tracer Concentration','Competitor Name','Competitor Concentration']
group = drop.groupby(columns, dropna=False)
mean = group.mean()
std = group.std()
sem = group.sem()
stdr = std.rename(columns={std.columns[-1]: 'std'}) # rename column to 'std'
semr = sem.rename(columns={sem.columns[-1]: 'sem'}) # rename column to 'sem'
merge = pd.concat([mean, stdr, semr], axis=1) # merge the amount, std and sem data frames into one df
tosplit = merge.reset_index().fillna({'Competitor Name': '-'}) # remove multiindex, in case of protein/tracer titration set competitor name as '-'
split = dict(tuple(tosplit.groupby(['Protein Name', 'Tracer Name', 'Competitor Name'], dropna=False))) # dictionary a data frame for each protein-tracer pair
return split
def _calc_Ki(ptc, params_df, platemap, final_fit):
"""Calculates Ki, Ki* (based on the actual protein concentration determined from the ic50 plot) and their errors.
:param ptc_pair: Tuple of 3 strings: protein, tracer and competitor name for which the Ki is calculated
:type ptc: tuple
:param params_df: Data frame with ic50 fitting parameters
:type params_df: pandas df
:param platemap: Platemap data frame
:type platemap: pandas df
:return: Values of Ki, Ki*, Pt and their errors
:rtype: tuple
"""
IC50, IC50_err, pmax = tuple(params_df.loc[ptc, ['IC50','IC50 error','max']]) # get IC50 and the upper asymptote of IC50 plot
Kd, Kd_err = tuple(final_fit.loc[ptc, ['Kd','Kd error']]) # get Kd and its error
LT = float(platemap['Tracer Concentration'].dropna().unique())
PT = float(platemap['Protein Concentration'].dropna().unique())
PT_2 = ( (Kd*pmax) / (LT-pmax) ) + pmax # protein conc calculated based on upper asymptote of IC50 plot
Kd_arr = np.random.normal(Kd, Kd_err, 100000) # simulated Kd values based on the real Kd and its error
IC50_arr = np.random.normal(IC50, IC50_err, 100000) # simulated IC50 values based on the real IC50 and its error
def _calc_Ki_val(Kd, LT, PT, IC50):
"""Calculates Ki value"""
P0 = ( -(Kd+LT-PT) + np.sqrt( ((Kd+LT-PT)**2) - (4*PT*LT) ) ) / 2
L50 = LT - ( (PT-P0) / 2 )
I50 = IC50 - PT + ( (PT-P0) / 2 ) * (1 + (Kd/L50) )
Ki = I50 / ( (L50/Kd) + (P0/Kd) + 1 )
return Ki
Ki = _calc_Ki_val(Kd, LT, PT, IC50) # calculate Ki
Ki_err = np.std(_calc_Ki_val(Kd_arr, LT, PT, IC50_arr)) # calculate Ki error as std of Ki values generated around the real Ki
Ki_2 = _calc_Ki_val(Kd, LT, PT_2, IC50) # calculate Ki*
Ki_2_err = np.std(_calc_Ki_val(Kd_arr, LT, PT_2, IC50_arr)) # calculate Ki* error as std of Ki* values generated around Ki*
return Ki, Ki_err, Ki_2, Ki_2_err, PT, PT_2
##### Curve fitting functions #####
def _EC50(pc, rmin, rmax, EC50, hill):
"""Function for fitting a curve to the plot of anisotropy (or intensity) against protein/tracer
concentration, where pc is protein (or tracer) concentration, rmin is the lower asymptote, rmax is the upper asymptote,
EC50 is midpoint of transition (pc at point of inflection), hill is the slope
"""
return (rmin - rmax) / (1 + (pc/EC50)**hill) + rmax
def _EC50_com(pc, rmin, rmax, EC50, hill):
"""Function for fitting a curve to the plot of anisotropy against competitor concentration, for fitting a curve
to the plot of amount of fluorescent tracer bound to the target protein against competitor concentration
where pc is competitor concentration, rmin is the lower asymptote, rmax is the upper asymptote,
EC50 is midpoint of transition (pc at point of inflection), hill is the slope
"""
return (rmax - rmin) / (1 + (pc/EC50)**hill) + rmin
def _LB(LT, PT, Kd):
"""Function for fitting a curve to the plot of concentration of fluorescent tracer bound to the target protein
against protein or tracer concentration.
"""
return ( (LT+PT+Kd) - np.sqrt( ( ((LT+PT+Kd)**2) - (4*LT*PT) ) ) ) / 2
def _init_params(df, t_type):
"""Estimates initial parameters for the _EC50 function that are passed to the curve fitting function
:param df: Data frame containing mean values of anisotropy or intensity
:type df: pandas df
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:return: List with estiamted parameters of min, max and EC50, hill is assumed to be 1
:rtype: list
"""
rmin = df['mean'].min()
rmax = df['mean'].max()
mid = (rmax + rmin) / 2
mid_idx = df['mean'].sub(mid).abs().argmin()
EC50 = df.iloc[mid_idx][f'{t_type} Concentration']
init_params = [rmin, rmax, EC50, 1]
return init_params
def _curve_fit(func, df, t_type, var, **kwargs):
"""Fits a curve to the plot of specified variable against protein (or tracer) concentration using pre-defined funcion.
:param func: Funcion describing the curve to be fitted to data points
:type func: func
:param df: Data frame containing mean values of anisotropy, intensity or amount bound and their errors (std and sem).
:type df: pandas df
:param t_type: Type of titration ('Protein', 'Tracer', or 'Competitor'), determines order of parmeters in returned list
:type t_type: str
:param **kwargs: Keyword arguments that can be passed into the scipy curve_fit function
:param var: Type of fitting perormetd, either logisitic ('log') or single site ('ssb').
:type var: str
:return: A list of fitting parameters along with their error in proper order so that it can be added to the fitting params data frame
:rtype: list
"""
drop = df[df[f'{t_type} Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaN mean values from data fitting
if 'sigma' in kwargs:
sigma = drop[kwargs.pop('sigma')] # take the column with std or sem error data as sigma
else:
sigma = None
if 'p0' not in kwargs and var == 'log': # user did not pass their initial guess
p0 = FA._init_params(drop, t_type) # use _init_params function to get the initial guess
elif 'p0' in kwargs:
p0 = kwargs.pop('p0') # remove p0 from kwargs and assign to p0 argument so that there is only one p0 arg passed to curve fit
else:
p0 = None
popt, pcov = curve_fit(func, drop[f'{t_type} Concentration'], drop['mean'], p0=p0, sigma=sigma, **kwargs)
perr = np.sqrt(np.diag(pcov)) # calculate the error of the fitting params
if var == 'log':
all_params = np.insert(popt, obj=[1,2,3,4], values=perr) # insert the errors after the respective fitting parameter value
if var == 'ssb':
all_params = np.insert(popt[::-1], obj=[1,2], values=perr[::-1]) # rearrange the order of parametrs in the array
return list(all_params)
def logistic_fit(self, prot=['all'], trac=['all'], rep=['all'], var='both', **kwargs):
"""Fits a logistic curve to the plot of anisotropy (or intensity) against protein or tracer concentration.
Returns the fitting parameters with associated errors for each repeat that are stored in the fitting_params data frame.
:param prot: List of protein names for which fitting is performed, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which fitting is performed, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which fitting is performed, defaults to ['all'].
:type rep: list of ints
:param var: A variable for which fitting is performed, (either 'r' for anisotropy or 'i' for inteensity), defaults to 'both'.
:type var: str
:param **kwargs: Keyword arguments that can be passed to the SciPy curve_fit function.
"""
# get data_dict and a list of protein-tracer names
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, ['all'], rep)
errors = [] # list for storing the details of errors due to failed fitting
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
for rep, value in data_dict.items(): # iterate over all repeats
metadata, data = value.values()
for ptc in ptc_list: # iterate over all protein-tracer pairs
if var == 'r' or var == 'both':
try: # try fitting the curve to anisotropy data
cols = ['rmin','rmin error','rmax', 'rmax error', 'r_EC50', 'r_EC50 error', 'r_hill', 'r_hill error']
r_mean = data['r_mean'][ptc] # extract the df with mean anisotropy for a given protein-tracer pair
params_r = FA._curve_fit(FA._EC50, r_mean, t_type, 'log', **kwargs) # fit the data to logistic curve using the initial parameteers
data['fit_params'].loc[ptc, cols] = params_r # add the fitting parameters to the respective df
except RuntimeError as e: # if fitting fails, added details about the error to the errors list and proceed intensity data fitting
r_error_info = (rep, 'r', ptc, e)
errors.append(r_error_info)
if var == 'i' or var == 'both':
try: # try fitting the curve to intensity data
cols = ['Ifree', 'Ifree error', 'Ibound','Ibound error', 'I_EC50', 'I_EC50 error', 'I_hill', 'I_hill error']
i_mean = data['i_mean'][ptc] # extract the df with i mean for a given protein-tracer pair
params_i = FA._curve_fit(FA._EC50, i_mean, t_type, 'log', **kwargs)
data['fit_params'].loc[ptc, cols] = params_i
except RuntimeError as e: # if fitting fails, added details about the error to the errors list and proceed to to the next protein-tracer pair
i_error_info = (rep, 'i', ptc, e)
errors.append(i_error_info)
if errors != []: # raise a warning if fitting failed for any protein-tracer pair
warnings.warn(f"The curve fitting failed in the following cases:\n\n{errors}\n\nTry passing additional keyword arguments to the fitting function.", RuntimeWarning)
else:
print('The logistic curve fitting was successfully performed.')
def logisitc_fit_com(self, prot=['all'], trac=['all'], rep=['all'], com=['all'], **kwargs):
"""Fits a logistic curve to the plot of anisotropy against competitor concentration. Returns the fitting
parameters with associated errors for each repeat that are stored in the fitting_params data frame.
:param prot: List of protein names for which fitting is performed, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which fitting is performed, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which fitting is performed, defaults to ['all'].
:type rep: list of ints
:param com: List of competitor names for which fitting is performed, defaults to ['all'].
:type com: list or list of str
:param var: A variable for which fitting is performed, (either 'r' for anisotropy or 'i' for inteensity), defaults to 'both'.
:type var: str
:param **kwargs: Keyword arguments that can be passed to the SciPy curve_fit function.
"""
# get data_dict and a list of protein-tracer names
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, com, rep)
errors = [] # list for storing the details of errors due to failed fitting
for rep, value in data_dict.items(): # iterate over all repeats
metadata, data = value.values()
for ptc in ptc_list: # iterate over all protein-tracer pairs
try: # try fitting the curve to anisotropy data
cols = ['rmin','rmin error','rmax', 'rmax error','r_IC50', 'r_IC50 error', 'r_hill', 'r_hill error']
r_mean = data['r_mean'][ptc] # extract the df with mean anisotropy for a given protein-tracer pair
params_r = FA._curve_fit(FA._EC50_com, r_mean, 'Competitor', 'log', **kwargs)
data['fit_params'].rename(columns={'r_EC50':'r_IC50', 'r_EC50 error':'r_IC50 error'})
data['fit_params'].loc[ptc, cols] = params_r # add the fitting parameters to the respective df
except RuntimeError as e: # if fitting fails, added details about the error to the errors list and proceed intensity data fitting
r_error_info = (rep, 'r', ptc, e)
errors.append(r_error_info)
if errors != []: # raise a warning if fitting failed for any protein-tracer pair
warnings.warn(f"The curve fitting failed in the following cases:\n\n{errors}\n\nTry passing additional keyword arguments to the fitting function.", RuntimeWarning)
else:
print('The logistic curve fitting was successfully performed.')
def single_site_fit(self, prot=['all'], trac=['all'], rep=['all'], **kwargs):
"""Fits a curve to the plot of concentration of fluorescent tracer bound to the target protein against the
protein (or tracer) concentration. The resulting fitting parameters are stored in the final_fit data frame.
:param prot: List of protein names for which fitting is performed, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which fitting is performed, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which fitting is performed, defaults to ['all'].
:type rep: list of ints
:param **kwargs: Keyword arguments that can be passed to the SciPy curve_fit function.
"""
# get data_dict and a list of protein-tracer names
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, ['all'], rep)
errors = [] # list for storing the details of errors due to failed fitting
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
for rep, value in data_dict.items(): # iterate over all repeats
metadata, data = value.values()
for ptc in ptc_list: # iterate over all protein-tracer pairs
try: # try fitting the curve to anisotropy data
amount_b = data['amount_bound'][ptc] # extract the df with mean amount bound for a given protein-tracer pair
params = FA._curve_fit(FA._LB, amount_b, t_type, 'ssb', **kwargs)
if t_type == 'Protein':
self.final_fit.loc[ptc, ['Kd', 'Kd error', 'LT', 'LT error']] = params
if t_type == 'Tarcer':
self.final_fit.loc[ptc, ['Kd', 'Kd error', 'PT', 'PT error']] = params
except RuntimeError as e:
error_info = (rep, ptc, e)
errors.append(error_info)
if errors != []: # raise a warning if fitting failed for any protein-tracer pair
warnings.warn(f"The curve fitting failed in the following cases:\n\n{errors}\n\nTry passing additional keyword arguments to the fitting function", RuntimeWarning)
else:
print('The single site curve fitting was successfully performed.')
def single_site_fit_com(self, prot=['all'], trac=['all'], com=['all'], rep=['all'], **kwargs):
"""Fits a curve to the plot of concentration of fluorescent tracer bound to the target protein against the
competitor concentration. The rsulting fitting parameters are stired in the fitting_params_com data frame.
:param prot: List of protein names for which fitting is performed, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which fitting is performed, defaults to ['all'].
:type trac: list of str
:param com: List of competitor names for which fitting is performed, defaults to ['all'].
:type com: list or list of str
:param rep: List of repeat numbers for which fitting is performed, defaults to ['all'].
:type rep: list of ints
:param **kwargs: Keyword arguments that can be passed to the SciPy curve_fit function.
"""
# get data_dict and a list of protein-tracer names
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, com, rep)
errors = [] # list for storing the details of errors due to failed fitting
for rep, value in data_dict.items(): # iterate over all repeats
metadata, data = value.values()
for ptc in ptc_list: # iterate over all protein-tracer pairs
try: # try fitting the curve to anisotropy data
amount_b = data['amount_bound'][ptc] # extract the df with mean amount bound for a given protein-tracer pair
params = FA._curve_fit(FA._EC50_com, amount_b, 'Competitor', 'log', **kwargs)
data['fit_params_com'].loc[ptc, :] = params
except RuntimeError as e:
error_info = (rep, ptc, e)
errors.append(error_info)
if errors != []: # raise a warning if fitting failed for any protein-tracer pair
warnings.warn(f"The curve fitting failed in the following cases:\n\n{errors}\n\nTry passing additional keyword arguments to the fitting function", RuntimeWarning)
else:
print('The single site curve fitting was successfully performed.')
##### Anisotropy and biniding constant plotting functions #####
def _get_items_to_plot(data_d, platemap, prot, trac, com, rep):
"""Creates a list of tuples with protein-tracer-competitor names based on the 'prot', 'trac' and 'com'
parameters and a subset of data_dict based on the 'rep' parameter.
"""
if prot[0] == 'all' and trac[0] == 'all' and com[0] == 'all': # all proteins and all tracers
ptc_list = list(data_d['repeat_1']['data']['r_mean'].keys()) # 'r_mean' dict contains all protein-tracer names as dict keys
else:
if com[0] == 'all':
com = list(platemap['Competitor Name'].dropna().unique())
if com == []:
com = ['-']
if trac[0] == 'all':
trac = list(platemap['Tracer Name'].dropna().unique())
if prot[0] == 'all':
prot = list(platemap['Protein Name'].dropna().unique())
ptc_list = [item for item in product(prot, trac, com)]
# define a data dictionary to iterate through based on the 'rep' parameter:
if rep[0] == 'all': # for all repeats use the whole data_dict
data_dict = data_d
else: # for specific repeats use the subset of data_dict containg only the repeats specified in 'rep' parameter
data_dict = {key: value for key, value in data_d.items() if int(key[-1]) in rep}
return data_dict, ptc_list
def _vir_data(df, t_type, samples):
"""Returns a set of data points (x-axis data) evenly spaced on a logarythmic scale to be used for plotting
the curves instead of the real concentration data to make the curve appear smoother.
:param df: Data frame containing conentration data
:type df: pandas df
:param t_type: Type of concentration to be used: Protein, Tracer, Competitor.
:type t_type: str
:param samples: Number of data points to generate
:type samples: int
:return: Array of concentration values evenly spaced between minimal and maximal concentration
:rtype: numpy array
"""
minc = df[f'{t_type} Concentration'].min()
maxc = df[f'{t_type} Concentration'].max()
return np.logspace(np.log10(minc),np.log10(maxc), samples)
def _plot_ani(data_df, params_df, ptc, t_type, fig, axs, err, var, rep, unit, exp, disp, leg, dpi):
"""General function for plotting the anisotropy and intensity and saving the figures. Returns a single figure.
:param data_df: Data frame with mean values of anisotropy or intensity and their associated errors
:type data_df: pandas df
:params_df: Data frame with fitting parameters
:type params_df: pandas df
:param ptc: protein-tracer-competitor for which the graph is to be generated
:type ptc: tuple
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:param fig: Figure on which the data is plotted, needed for saving the figure as png file
:type fig: matplotlib Figure
:param axs: Indexed axis object on which the data is to be plotted, (e.g. axs[0, 1])
:type axs: matplotlib AxesSubplot
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem')
:type err: str
:param var: Variable for which the plot is to be generated ('r' or 'i')
:type var: str
:param rep: Repeat number for labelling of the graph
:type rep: 'str'
:param unit: Concentration units to be displayed on the plots
:type unit: str
:param exp: Determines whether the figure will be saved, can be either bool or string with directory path
:type exp: bool or 'str'
:param disp: Determines whether the figure will be displayed after plotting, default True
:type disp: bool
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param dpi: Resolution of the figure in points per inch
:type dpi: int
"""
if var == 'r': # define the parameters, legend text and legend coordinates characteristic for anisotropy data
params = tuple(params_df.loc[ptc, ['rmin','rmax','r_EC50','r_hill']]) # fit params for curve plotting
text = "$r_{min}$ = %.4f \u00B1 %.4f\n$r_{max}$ = %.4f \u00B1 %.4f\n$hill$ = %.2f \u00B1 %.2f\n" % tuple(params_df.loc[ptc, ['rmin',
'rmin error','rmax','rmax error','r_hill', 'r_hill error']])
EC50, EC50e = tuple(params_df.loc[ptc, ['r_EC50','r_EC50 error']])
text_final = text + '$EC_{50}$ = ' + f'{EC50:,.2f} \u00B1 {EC50e:,.2f}'
ylabel = 'Anisotropy'
if var == 'i': # define the parameters, legend text and legend coordinates characteristic for intensity data
params = tuple(params_df.loc[ptc, ['Ifree','Ibound','I_EC50','I_hill']]) # fit params for curve plotting
If, Ife, Ib, Ibe, EC50, EC50e = tuple(params_df.loc[ptc, ['Ifree','Ifree error', 'Ibound', 'Ibound error','r_EC50','r_EC50 error']])
text = "$hill$ = %.2f \u00B1 %.2f\n" % tuple(params_df.loc[ptc, ['I_hill', 'I_hill error']])
text_final = '$I_{free}$ = ' + f'{If:,.1f} \u00B1 {Ife:,.1f}\n' + '$I_{bound}$ = ' + f'{Ib:,.1f} \u00B1 {Ibe:,.1f}\n' + text + '$EC_{50}$ = ' + f'{EC50:,.2f} \u00B1 {EC50e:,.2f}'
ylabel = 'Intensity'
drop = data_df[data_df[f'{t_type[0]} Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaNs from plotting
axs.errorbar(drop[f'{t_type[0]} Concentration'], drop['mean'], yerr=drop[err], color='black', fmt='o', capsize=3, marker='s')
axs.set_xscale('log')
axs.set_ylabel(ylabel)
axs.set_xlabel(f'[{ptc[int(t_type[1])]}] ({unit})')
vir_data = FA._vir_data(drop, t_type[0], 200) # x-axis data for curve plotting
axs.plot(vir_data, FA._EC50(vir_data, *params), color='blue')
if leg == True: # display title and legend with fitting parameters
axs.set_title(f'Protein: {ptc[0]}, Tracer: {ptc[1]}')
axs.legend([f'logistic fitted curve\n{text_final}'], frameon=False, fontsize=11)
if exp == True: # save figures in the same directory as the notebook
fig.savefig(f"rep_{rep[-1]}_{var}_{str(ptc[0])}_{str(ptc[1])}.png", dpi=dpi)
if type(exp) == str: # save figures in the user defined directory
fig.savefig(f"{exp}rep_{rep[-1]}_{var}_{str(ptc[0])}_{str(ptc[1])}.png", dpi=dpi)
if disp == False: # if function is called by save_ani_figs then the plotted figures are not displayed
plt.close(fig)
def plot_ani(self, prot=['all'], trac=['all'], rep=['all'], err='std', legend=True):
"""Plots anisotropy and intensity against protein or tracer concentration with a fitted logistic curve
for specified repeats and protein-tracer pairs. A separate figure for each repeat is created with anisotropy
and intensity graphs for all specified proteins and tracers arranged in two columns.
:param prot: List of protein names for which the graphs are created, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which the graphs are created, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which the graphs are created, defaults to ['all'].
:type rep: list of int
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem'), defaults to 'std'.
:type err: str
"""
# get data_dict and a list of protein-tracer names
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, ['all'], rep)
unit = str(self.plate_map['Concentration Units'].dropna().unique()[0]) # concentration units
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = ('Protein', 0)
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = ('Tracer', 1)
for key, value in data_dict.items(): # iterte over all repeats and create a sperate figure for each repeat
metadata, data = value.values()
fig, axs = plt.subplots(len(ptc_list), 2, figsize=(2*6.4, len(ptc_list)*4.8), tight_layout=True) # grid for subplots has two columns and a variable number of rows, figsize automatically scales up
fit_params = data['fit_params']
if len(data_dict) > 1: # do not display info about repeat number if there is only one repeat
fig.suptitle(f"Repeat {key[-1]}", fontsize=16)
for idx, ptc in enumerate(ptc_list): # for each portein-tracer pair plot two graphs: anisotropy and intensity
r_data_df, i_data_df = data['r_mean'][ptc], data['i_mean'][ptc] # extract the df with anisotropy and intensity
if len(ptc_list) == 1: # for only one protein-tracer pair the subplot grid is 1-dimensional
FA._plot_ani(r_data_df, fit_params, ptc, t_type, fig, axs[0], err, 'r', key, unit, exp=False, disp=True, leg=legend, dpi=250)
FA._plot_ani(i_data_df, fit_params, ptc, t_type, fig, axs[1], err, 'i', key, unit, exp=False, disp=True, leg=legend, dpi=250)
else: # for more than one protein-tracer pair the subplot grid is 2-dimensional
FA._plot_ani(r_data_df, fit_params, ptc, t_type, fig, axs[idx,0], err, 'r', key, unit, exp=False, disp=True, leg=legend, dpi=250)
FA._plot_ani(i_data_df, fit_params, ptc, t_type, fig, axs[idx,1], err, 'i', key, unit, exp=False, disp=True, leg=legend, dpi=250)
def _plot_ani_com(data_df, params_df, ptc, err, rep, unit, exp, leg, dpi):
"""General function for plotting the anisotropy figures for competition experiments. Returns a single figure
:param data_df: Data frame with mean values of anisotropy or intensity and their associated errors
:type data_df: pandas df
:params_df: Data frame with fitting parameters
:type params_df: pandas df
:param ptc: protein-tracer pair for which the graph is to be generated
:type ptc: tuple
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem')
:type err: str
:param rep: Repeat number for labelling of the graph
:type rep: 'str'
:param unit: Concentration units to be displayed on the plots
:type unit: str
:param exp: Determines whether the figure will be saved, can be either bool or string with directory path
:type exp: bool or 'str'
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param dpi: Resolution of the figure in points per inch
:type dpi: int
"""
# define the parameters, legend text and legend coordinates characteristic for anisotropy data
fig, axs = plt.subplots(1, 1, figsize=(6.4, 4.8), tight_layout=True)
params = tuple(params_df.loc[ptc, ['rmin', 'rmax','r_IC50','r_hill']]) # fit params for curve plotting
text = "$r_{min}$ = %.4f \u00B1 %.4f\n$r_{max}$ = %.4f \u00B1 %.4f\n$hill$ = %.2f \u00B1 %.2f\n" % tuple(params_df.loc[ptc, ['rmin',
'rmin error','rmax','rmax error','r_hill', 'r_hill error']])
IC50, IC50e = tuple(params_df.loc[ptc, ['r_IC50','r_IC50 error']])
text_final = text + '$IC_{50}$ = ' + f'{IC50:,.1f} \u00B1 {IC50e:,.1f}'
drop = data_df[data_df['Competitor Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaNs from plotting
axs.errorbar(drop['Competitor Concentration'], drop['mean'], yerr=drop[err], color='black', fmt='o', capsize=3, marker='s')
axs.set_xscale('log')
axs.set_ylabel('Anisotropy')
axs.set_xlabel(f'[{ptc[2]}] ({unit})')
vir_data = FA._vir_data(drop, 'Competitor', 200) # x-axis data for curve plotting
axs.plot(vir_data, FA._EC50_com(vir_data, *params), color='blue')
if leg == True: # display title and legend with fitting parameters on the graph
axs.set_title(f'{ptc[0]}, {ptc[1]}, {ptc[2]}')
axs.legend([f'logistic fitted curve\n{text_final}'], frameon=False, fontsize=11)
if exp == True: # save figures in the same directory as the notebook
fig.savefig(f"rep_{rep[-1]}_r_{str(ptc[0])}_{str(ptc[1])}_{str(ptc[2])}.png", dpi=dpi)
if type(exp) == str: # save figures in the user defined directory
fig.savefig(f"{exp}rep_{rep[-1]}_r_{str(ptc[0])}_{str(ptc[1])}_{str(ptc[2])}.png", dpi=dpi)
def plot_ani_com(self, prot=['all'], trac=['all'], com=['all'], rep=['all'], err='std', legend=True, export=False, dpi=250):
"""Plots anisotropy against competitor concentration with a fitted logistic curve for specified repeats and
proteins, tracers and competitors.
:param prot: List of protein names for which the graphs are created, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which the graphs are created, defaults to ['all'].
:type trac: list of str
:param com: List of competitor names for which the graphs are created, defaults to ['all'].
:type com: list or list of str
:param rep: List of repeat numbers for which the graphs are created, defaults to ['all'].
:type rep: list of int
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem'), defaults to 'std'.
:type err: str
:param export:
:param export: Save the figures (True) in the same directory as this Notebook or provide a path (str) to specified directory
:type export: bool or 'str'
:param legend: Display legend and title on the figures, defaults to False.
:type legend: bool
:param dpi: Resolution of the figure in points per inch, defaults to 250.
:type dpi: int
"""
warnings.filterwarnings("ignore")
#get data_dict and a list of protein-tracer names
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, com, rep)
unit = str(self.plate_map['Concentration Units'].dropna().unique()[0]) # concentration units
for key, value in data_dict.items(): # iterte over all repeats and create a sperate figure for each repeat
metadata, data = value.values()
fit_params = data['fit_params']
for idx, ptc in enumerate(ptc_list): # for each portein-tracer pair plot two graphs: anisotropy and intensity
r_data_df = data['r_mean'][ptc] # extract the df with anisotropy and intensity
FA._plot_ani_com(r_data_df, fit_params, ptc, err, key, unit, exp=export, leg=legend, dpi=dpi)
def save_ani_figs(self, prot=['all'], trac=['all'], rep=['all'], var='both', path='', err='std', legend=False, dpi=250):
"""Saves single figures of anisotropy and intensity for specified repeats and protein-tracer pairs in the same
directory as this notebook or in user defined directory if the path is provided.
:param prot: List of protein names for which the graphs are exported, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which the graphs are exported, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which the graphs are exported, defaults to ['all'].
:type rep: list of ints
:param var: A variable for which the graphs are exported, either 'r' for anisotropy or 'i' for inteensity, defaults to 'both'.
:type var: str
:param path: A path to directory in which the figures are saved, defaults to '' (the same directory as this Jupyter Notebook).
:type path: str
:param err: Type of error data displayed as error bars, either 'std' or 'sem', defaults to 'std'.
:type err: str
:param legend: Display legend and title on the figures, defaults to False.
:type legend: bool
:param dpi: Resolution of the figure in points per inch, defaults to 250.
:type dpi: int
"""
# get data_dict and a list of protein-tracer names
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, ['all'], rep)
unit = str(self.plate_map['Concentration Units'].dropna().unique()[0]) # concentration units
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = ('Protein', 0)
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = ('Tracer', 1)
for key, value in self.data_dict.items(): # iterate over all repeats
metadata, data = value.values()
fit_params = data['fit_params']
for ptc in ptc_list: # iterate over each protein-tracer pair in
r_data_df, i_data_df = data['r_mean'][ptc], data['i_mean'][ptc] # extract the df with anisotropy and intensity dfs
if var == 'r' or var == 'both':
fig, axs = plt.subplots(figsize=(6.4, 4.8), tight_layout=True) # create a figure with a single axis for anisotropy
FA._plot_ani(r_data_df, fit_params, ptc, t_type, fig, axs, err, 'r', key, unit, exp=path, disp=False, leg=legend, dpi=dpi)
if var == 'i' or var == 'both':
fig, axs = plt.subplots(figsize=(6.4, 4.8), tight_layout=True)
FA._plot_ani(i_data_df, fit_params, ptc, t_type, fig, axs, err, 'i', key, unit, exp=path, disp=False, leg=legend, dpi=dpi)
print('The figures were successfully exported.')
def _plot_kd(data_df, ptc, final_fit, t_type, err, rep, unit, exp, leg, dpi):
"""Plots amount bound against protein or tracer concentration with a fitted curve on a separate figure for a specific protein-tracer pair.
:param data_df: Data frame with mean values of amount of tracer bound and their associated errors
:type data_df: pandas df
:param ptc: Protein-tracer-competitor names for which data will be plotted
:type ptc: list of tuples
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem')
:type err: str
:param rep: Repeat number for labelling of the graph
:type rep: 'str'
:param unit: Concentration units to be displayed on the plots
:type unit: str
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param exp: Determines whether the figure will be saved, can be either bool or string with directory path
:type exp: bool or 'str'
:param dpi: Resolution of the figure in points per inch
:type dpi: int
"""
drop = data_df[data_df[f'{t_type} Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaNs from data fitting
fig, axs = plt.subplots(1, 1, figsize=(6.4, 4.8), tight_layout=True)
# define the x axis data and labels for protein and tracer titration cases
if t_type == 'Protein':
LT, LTe, Kd, Kde = tuple(final_fit.loc[ptc, ['LT','LT error','Kd','Kd error']])
text = '$L_{T}$ = ' + f'{LT:,.2f} \u00B1 {LTe:,.2f}\n' + '$K_{d}$ = ' + f'{Kd:,.2f} \u00B1 {Kde:,.2f}'
xlabel = f'[{ptc[0]}] ({unit})'
params = (LT, Kd)
if t_type == 'Tracer':
PT, PTe, Kd, Kde = tuple(final_fit.loc[ptc, ['PT','PT error','Kd','Kd error']])
text = '$P_{T}$ = ' + f'{PT:,.2f} \u00B1 {PTe:,.2f}\n' + '$K_{d}$ = ' + f'{Kd:,.2f} \u00B1 {Kde:,.2f}'
xlabel = f'[{ptc[1]}] ({unit})'
params = (PT, Kd)
axs.errorbar(drop[f'{t_type} Concentration'], drop['mean'], yerr=drop[err], color='black', fmt='o', capsize=3, marker='s')
axs.set_xscale('log')
axs.set_ylabel(f'[Fluorescent Tracer Bound] ({unit})')
axs.set_xlabel(xlabel)
vir_data = FA._vir_data(drop, t_type, 200)
axs.plot(vir_data, FA._LB(vir_data, *params), color='blue')
if leg == True: # display the figure title, legend and annotation with fitting params
if rep[1] > 1: # do not display info about repeat number if there is only one repeat
axs.set_title(f'Repeat {rep[0][-1]}, Protein: {ptc[0]}, Tracer: {ptc[1]}')
else:
axs.set_title(f'Protein: {ptc[0]}, Tracer: {ptc[1]}')
axs.legend([f'single site fitted curve\n{text}'], fontsize=11, frameon=False)
plt.show()
if exp == True: # save the figure to the same directory as the notebook
fig.savefig(f"Kd_plot_rep_{rep[0][-1]}_{str(ptc[0])}_{str(ptc[1])}.png", dpi=dpi)
if type(exp) == str: # save the figure to user defined directory
fig.savefig(f"{exp}Kd_plot_rep_{rep[0][-1]}_{str(ptc[0])}_{str(ptc[1])}.png", dpi=dpi)
def _overlay_kd_plots(plate_map, data_dict, ptc_list, final_fit, t_type, err, unit, exp, leg, dpi):
"""Creates a figure with overlayed plots for specified protein-tracer pairs and repeats
:param plate_map: Platemap
:type plate_map: pandas df
:param data_dict: Data dictionary containing the specific repeats for which data will be plotted
:type data_dict: dict
:param ptc: List of protein-tracer names for which data will be plotted
:type ptc: list of tuples
:param t_type: Type of titration ('Protein' or 'Tracer')
:type t_type: str
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem')
:type err: str
:param unit: Concentration units to be displayed on the plots
:type unit: str
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param exp: Determines whether the figure will be saved, can be either bool or string with directory path
:type exp: bool or 'str'
:param dpi: Resolution of the figure in points per inch
:type dpi: int
"""
if len(ptc_list) < 2:
raise DataError('At least two data sets are required for overlayed plot.')
fig, axs = plt.subplots(1, 1, figsize=(6.4, 4.8), tight_layout=True)
text_final = [] # list to store the legend string for each data set
cmaps = ['Blues', 'Greens', 'Oranges', 'Purples', 'Reds', 'Greys', 'YlOrBr', 'YlOrRd',
'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']
iter_cmaps = iter(cmaps)
for key, value in data_dict.items(): # iterte through all repeats of the defined data_dict
metadata, data = value.values()
for ptc in ptc_list: # iterate through the list of protein-tracer names to plot its data on the same figure
data_df = data['amount_bound'][ptc] # extract the correct df with amount bound for a given protein-tracer pair
drop = data_df[ data_df[f'{t_type} Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaNs from data fitting
if t_type == 'Protein':
LT, LTe, Kd, Kde = tuple(final_fit.loc[ptc, ['LT','LT error','Kd','Kd error']])
text = '$L_{T}$ = ' + f'{LT:,.2f} \u00B1 {LTe:,.2f}\n' + '$K_{d}$ = ' + f'{Kd:,.2f} \u00B1 {Kde:,.2f}'
params = (LT, Kd)
if t_type == 'Tracer':
PT, PTe, Kd, Kde = tuple(final_fit.loc[ptc, ['PT','PT error','Kd','Kd error']])
text = '$P_{T}$ = ' + f'{PT:,.2f} \u00B1 {PTe:,.2f}\n' + '$K_{d}$ = ' + f'{Kd:,.2f} \u00B1 {Kde:,.2f}'
params = (PT, Kd)
if len(data_dict) > 1: # do not display info about repeat number if there is only one repeat
text_long = f"rep {key[-1]}, {ptc[0]}, {ptc[1]}\n{text}"
else:
text_long = f"{ptc[0]}, {ptc[1]}\n{text}"
text_final.append(text_long)
vir_data = FA._vir_data(drop, t_type, 200)
cmap = plt.cm.get_cmap(next(iter_cmaps)) # take the next color map from the list
axs.errorbar(drop[f'{t_type} Concentration'], drop['mean'], yerr=drop[err], fmt='o', capsize=3, marker='s', color=cmap(0.95))
axs.plot(vir_data, FA._LB(vir_data, *params), color=cmap(0.50))
axs.set_xscale('log')
axs.set_ylabel(f'[Fluorescent Tracer Bound] ({unit})')
axs.set_xlabel(f'[{t_type}] ({unit})')
if leg == True: # display the figure title, legend and annotation with fitting params
axs.set_title(f'Overlayed plot')
lbox = axs.legend(text_final, fontsize=11, frameon=False, loc='upper left', bbox_to_anchor=(1.03, 0.95))
fig.canvas.draw() # draw the canvas so that figure and legend size is defined
# calculate length by which the figure will be widened to accomodate the legend
w = (lbox.get_window_extent().width + (0.06 * axs.get_window_extent().width)) / fig.dpi
fig.set_size_inches(6.4 + w, 4.8) # resize the figure
plt.show()
if exp == True: # save the figure to the same directory as the notebook
fig.savefig(f"Overlayed_Kd_plot.png", dpi=dpi)
if type(exp) == str: # save the figure to user defined directory
fig.savefig(f"{exp}Overlayed_Kd_plot.png",dpi=dpi)
def plot_kd(self, prot=['all'], trac=['all'], rep=['all'], err='std', overlay=False, legend=True, export=False, dpi=250):
"""Plots the concentration of fluorescent tracer bound to target protein against the protein (or tracer) concentration.
:param prot: List of protein names for which the graphs will be created, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which the graphs will be created, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which the graphs will be created, defaults to ['all'].
:type rep: list of ints
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem'), defaults to 'std'.
:type err: str
:param overlay: Overlayes the data on a single figure, defaults to False.
:type overlay: bool
:param legend: Display the figure title and legend, defaults to True.
:type legend: bool
:param export: Save the figures (True) in the same directory as this Notebook or provide a path (str) to specified directory
:type export: bool or 'str
:param dpi: Resolution of the exported figure in dots per inches, defaults to 250.
:type dpi: int
"""
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, ['all'], rep)
unit = str(self.plate_map['Concentration Units'].dropna().unique()[0]) # concentration units
if len(self.plate_map['Tracer Concentration'].dropna().unique()) == 1: # protein is titrated to a constant amount of tracer
t_type = 'Protein'
if len(self.plate_map['Protein Concentration'].dropna().unique()) == 1: # tracer is titrated to a constant amount of protein
t_type = 'Tracer'
if overlay == False:
for key, value in data_dict.items(): # iterte through all repeats of the defined data_dict
metadata, data = value.values()
rep = (key, len(data_dict))
for ptc in ptc_list: # iterate through the list of protein-tracer names to create a separate figure for each pair
data_df = data['amount_bound'][ptc] # extract the correct df with amount bound for a given protein-tracer pair
FA._plot_kd(data_df, ptc, self.final_fit, t_type, err, rep, unit, export, legend, dpi)
else:
FA._overlay_kd_plots(self.plate_map, data_dict, ptc_list, self.final_fit, t_type, err, unit, export, legend, dpi)
def _plot_ic50(data_df, params_df, ptc, err, rep, unit, exp, leg, dpi):
"""Plots amount bound against protein or tracer concentration with a fitted curve on a separate figure.
:param data_df: Data frame with mean values of amount of tracer bound and their associated errors
:type data_df: pandas df
:param ptc: Protein and tracer names for which data will be plotted
:type ptc: list of tuples
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem')
:type err: str
:param rep: Repeat number for labelling of the graph !!!!!!!!!!!!!!!!!!!!!!!
:type rep: tuple
:param unit: Concentration units to be displayed on the plots
:type unit: str
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param exp: Determines whether the figure will be saved, can be either bool or string with directory path
:type exp: bool or 'str'
:param leg: Determines whether the legend and box with fitting parameters will be displayed on the figure, default True
:type leg: bool
:param dpi: Resolution of the figure in points per inch
:type dpi: int
"""
drop = data_df[data_df['Competitor Concentration'] != 0].dropna(subset=['mean']) # exclude the protein concentration = 0 point and any NaNs from data fitting
fig, axs = plt.subplots(1, 1, figsize=(6.4, 4.8), tight_layout=True)
params = tuple(params_df.loc[ptc, ['min','max','IC50','hill']])
ic50, ic50e, Ki, Kie, Ki2, Ki2e = tuple(params_df.loc[ptc, ['IC50','IC50 error','Ki','Ki error','Ki*','Ki* error']])
text = "$IC_{50}$" + f" = {ic50:,.2f} \u00B1 {ic50e:,.2f}\n" + "$K_i$" + f" = {Ki:,.2f} \u00B1 {Kie:,.2f}\n" + "$K_i*$" + f" = {Ki2:,.2f} \u00B1 {Ki2e:,.2f}"
axs.errorbar(drop['Competitor Concentration'], drop['mean'], yerr=drop[err], color='black', fmt='o', capsize=3, marker='s')
axs.set_xscale('log')
axs.set_ylabel(f'[Fluorescent Tracer Bound] ({unit})')
axs.set_xlabel(f'[{ptc[2]}] ({unit})')
vir_data = FA._vir_data(drop, 'Competitor', 200)
axs.plot(vir_data, FA._EC50_com(vir_data, *params), color='blue')
if leg == True: # display the figure title and legend with fitting params
if rep[1] > 1: # do not display info about repeat number if there is only one repeat
axs.set_title(f'Repeat {rep[0][-1]}, {ptc[0]}, {ptc[1]}, {ptc[2]}')
else:
axs.set_title(f'{ptc[0]}, {ptc[1]}, {ptc[2]}')
axs.legend([f'single site fitted curve\n{text}'], fontsize=11, frameon=False)
plt.show()
if exp == True: # save the figure to the same directory as the notebook
fig.savefig(f"IC50_plot_rep_{rep[0][-1]}_{str(ptc[0])}_{str(ptc[1])}_{str(ptc[2])}.png", dpi=dpi)
if type(exp) == str: # save the figure to user defined directory
fig.savefig(f"{exp}IC50_plot_rep_{rep[0][-1]}_{str(ptc[0])}_{str(ptc[1])}_{str(ptc[2])}.png", dpi=dpi)
def plot_ic50(self, prot=['all'], trac=['all'], rep=['all'], com=['all'], err='std', legend=True, export=False, dpi=250):
"""Plots the concentration of the fluoorescent tracer bound to the target protein against competitor concentration.
Calculates the binding constant (Ki) for each competitor.
:param prot: List of protein names for which the graphs will be created, defaults to ['all'].
:type prot: list of str
:param trac: List of tracer names for which the graphs will be created, defaults to ['all'].
:type trac: list of str
:param rep: List of repeat numbers for which the graphs will be created, defaults to ['all'].
:type rep: list of ints
:param com: List of competitor names for which the graphs are created, defaults to ['all'].
:type com: list or list of str
:param err: Type of error data displayed as error bars, either standard deviation ('std') or standard error ('sem'), defaults to 'std'.
:type err: str
:param legend: Display the figure title and legend, defaults to True.
:type legend: bool
:param export: Save the figures (True) in the same directory as this Notebook or provide a path (str) to specified directory
:type export: bool or 'str
:param dpi: Resolution of the exported figure in dots per inches, defaults to 250.
:type dpi: int
"""
warnings.filterwarnings("ignore")
data_dict, ptc_list = FA._get_items_to_plot(self.data_dict, self.plate_map, prot, trac, com, rep)
unit = str(self.plate_map['Concentration Units'].dropna().unique()[0])
print("The Ki* is calculated based on the total protein concentration calculated from the measured anisotropy. Below each figure the values of total protein concentration from platemap (LT) and as calculated from measured anisotropy (LT*) are stated.")
for key, value in self.data_dict.items(): # iterte through all repeats of the defined data_dict
metadata, data = value.values()
params_df = data['fit_params_com']
rep = (key, len(data_dict))
for ptc in data['amount_bound'].keys(): # iterate through the list of protein-tracer names to create a separate figure for each pair
data_df = data['amount_bound'][ptc] # extract the correct df with amount bound for a given protein-tracer pair
params = FA._calc_Ki(ptc, params_df, self.plate_map, self.final_fit) # calculate Ki, Ki* and LT
params_df.loc[ptc, ['Ki','Ki error','Ki*','Ki* error']] = params[0:4] # instert Ki into the fit_params_com df
if key in data_dict.keys() and ptc in ptc_list: # plot figures only for user specified proteins, tracersandcompetitors
FA._plot_ic50(data_df, params_df, ptc, err, rep, unit, export, legend, dpi)
print(f'LT = {params[4]:,.1f} {unit}, LT* = {params[5]:,.1f} {unit}')
##### Fittig params set, export and import functions #####
def set_fitparams(self, prot, trac, **kwargs):
"""Allows to set a value of any parameter in the final fit data frame for a specific protein-tracer pair.
:param prot: Protein name.
:type prot: str
:param trac: Tracer name.
:type trac: str
:param **kwargs: Keyword arguments represeting the parameter and its value, e.g. lambda=1.5, rmin=0.30
"""
wrong_cols = []
for key, value in kwargs.items(): # iterate over the kwargs dictionary
if key not in self.final_fit.columns:
wrong_cols.append(key)
else:
self.final_fit.loc[(prot, trac), key] = value # overwrite the parameters in fitting params df with all params passed as keyword arguments
if wrong_cols != []:
warnings.warn(f'No such columns in the final_fit data frame:\n{wrong_cols}')
def export_params(self, path='', file_type='csv'):
"""Export the final_fit, fitting_params and, in case of competition data, fitting_params_com for each repeat to csv or excel files.
:param path: A path to directory in which the file is saved, defaults to '' (i.e. the same directory as this Jupyter Notebook)
:type path: str
:param file_type: Type of file generated, either 'csv' or 'excel' file, defaults to csv
:type file_type: 'str'
"""
if file_type == 'csv': # export as csv file
self.final_fit.to_csv(path_or_buf=f"{path}final_fit_params.csv")
if file_type == 'excel': # export as excel file
self.final_fit.to_excel(excel_writer=f"{path}all_fit_params.xlsx", sheet_name="final_fit_params")
for key, value in self.data_dict.items(): #iterate over all repeats
metadata, data = value.values()
if file_type == 'csv': # export as csv file
data['fit_params'].to_csv(path_or_buf=f"{path}rep_{key[-1]}_fit_params.csv")
if 'fit_params_com' in data.keys():
data['fit_params_com'].to_csv(path_or_buf=f"{path}rep_{key[-1]}_fit_params_com.csv")
if file_type == 'excel': # export as excel file
with pd.ExcelWriter(f"{path}all_fit_params.xlsx", engine='openpyxl', mode='a') as writer:
data['fit_params'].to_excel(writer, sheet_name=f"{key}_fit_params")
if 'fit_params_com' in data.keys():
data['fit_params_com'].to_excel(writer, sheet_name=f"{key}_fit_params_com")
print(f'The fitting parameters were exported to the {file_type} files.')
def import_params(self, csv_file):
"""Allows to import a csv file with final_fit parameters (i.e. rmin, rmax, lamda, Kd and their errors).
:param csv_file: A csv file path with parameters to be imported
:type csv_file: str
"""
with open(csv_file) as file: # read the csv into pandas df
df = pd.read_csv(file, sep=',', index_col=[0,1], engine='python', encoding='utf-8') # import with multiindex
if list(df[df.columns[0]].unique()) != ['-']: # df contains copetitor names
df = df.set_index(df.columns[0], append=True).rename_axis([None,None,None]) # add competitor name column to multiinex
else: # no competitor name, so delete the first column containg only '-'
df.drop(df.columns[0], axis=1, inplace=True)
cols = df.columns.intersection(self.final_fit.columns) # columns common to imported df and final_fit df
for index in list(df.index): # iterate over the indexes of imported df
self.final_fit.loc[index, cols] = df.loc[index, cols].tolist() # overwrite the existing values in the final_fit df with the ones from imported df
col_diff = list(df.columns.difference(self.final_fit.columns)) # list of clumns present in imported df but absent from final_fit df
if col_diff != []: # display warning about missing columns in the final_fit
warnings.warn(f"The final_fit data frame does not contain following columns:\n'{col_diff}'")
def export_data(self, path=''):
"""Saves the mean anisotropy, intensity and amount bound data along with their standard deviation
and standard error into excel file.
:param path: Path to the folder in wchich the excel file is saved.
:type path: str
"""
for key, value in self.data_dict.items():
metadata, data = value.values()
ptc_list = list(data['r_mean'].keys()) # list of all protein-tracer names
for ptc in ptc_list:
# remove redundant columns and rename the remaining ones for anisotropy, intensity and amount bound dfs
r_df = data['r_mean'][ptc].drop(['Protein Name','Tracer Name','Competitor Name'], axis=1)
r_df2 = r_df.rename(columns={'mean': 'anisotropy mean', 'std': 'ani std', 'sem': 'ani sem'}).set_index('Protein Concentration')
i_df = data['i_mean'][ptc].drop(['Protein Name','Tracer Name','Competitor Name'], axis=1)
i_df2 = i_df.rename(columns={'mean': 'intensity mean', 'std': 'int std', 'sem': 'int sem'}).set_index('Protein Concentration')
ab_df = data['amount_bound'][ptc].drop(['Protein Name','Tracer Name','Competitor Name'], axis=1)
ab_df2 = ab_df.rename(columns={'mean': 'amount bound mean', 'std': 'ab std', 'sem': 'ab sem'}).set_index('Protein Concentration')
# join the anisotropy, intensity and amount bound dfs together
m = pd.concat([r_df2, i_df2, ab_df2], axis=1)
if ptc == ptc_list[0]: # for the first iteration create the excel file
m.to_excel(f"{path}Anisotropy Data.xlsx", sheet_name=f'rep_{key[-1]}_{ptc[0][:7]}_{ptc[1][:7]}_{ptc[2][:7]}')
else: # for next iterations append new sheet to the existing excel file
with | pd.ExcelWriter(f"{path}Anisotropy Data.xlsx", engine='openpyxl', mode='a') | pandas.ExcelWriter |
# type: ignore
### Standard imports ###
import os
import glob
import logging
import argparse
import itertools
import operator
import timeit
from multiprocessing import Pool
### Non-standard imports ###
import yaml
import numpy as np
import pandas
### Local imports ###
from riptide import TimeSeries, ffa_search, find_peaks
from riptide.pipelines import Candidate
from riptide.clustering import cluster_1d
from riptide.reading import PrestoInf, SigprocHeader
from riptide.pipelines.harmonic_filtering import flag_harmonics
def parse_yaml_config(fname):
with open(fname, 'r') as fobj:
config = yaml.load(fobj)
return config
def get_logger(name, level=logging.INFO):
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
fmt="%(asctime)s.%(msecs)03d - %(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
if not logger.handlers:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def get_lower_dm_limit(dm_trials, dm_min=None):
""" 'dm_min' is the minimum DM enforced by the user """
if dm_min is not None:
return dm_min
else:
return min(dm_trials)
def get_galactic_dm_limit(glat_radians, dmsinb_max, eps=1e-6):
return dmsinb_max / (np.sin(abs(glat_radians)) + eps)
def get_upper_dm_limit(dm_trials, glat_radians, dm_max=None, dmsinb_max=None, eps=1e-6):
""" 'dm_max' is the maximum DM enforced by the user, and 'dmsinb_max' the maximum
value of DM x sin |b| allowed. """
result = max(dm_trials)
if dm_max is not None:
result = min(result, dm_max)
if dmsinb_max is not None:
result = min(result, get_galactic_dm_limit(glat_radians, dmsinb_max))
return result
def grouper(iterable, n):
""" Iterate through iterable, yielding groups of n elements. The last
group may have less than n elements. """
# we want izip_longest() in python 2.x
# or zip_longest() in python 3.x
if hasattr(itertools, 'zip_longest'):
zipper = itertools.zip_longest
else:
zipper = itertools.izip_longest
args = [iter(iterable)] * n
for group in zipper(*args, fillvalue=None):
filtered_group = [val for val in group if val is not None]
yield filtered_group
def sort_dataframe_by_column(df, column):
""" Utility function that handles the change in pandas API made in version
0.17.0, where DataFrame.sort() was replaced by DataFrame.sort_values()."""
if hasattr(df, 'sort_values'):
return df.sort_values(column)
else:
return df.sort(column)
class DetectionCluster(list):
""" Cluster of Detection objects. """
def __init__(self, detections):
super(DetectionCluster, self).__init__(detections)
@property
def top_detection(self):
return max(self, key=operator.attrgetter('snr'))
def to_dict(self):
t = self.top_detection
return {"period": t.period, "ducy": t.ducy, "dm": t.dm, "snr": t.snr, "num_detections": len(self)}
def __str__(self):
name = type(self).__name__
return "{name:s} [size = {size:4d}, P0 = {top.period:.9e}, DM = {top.dm:8.3f}, S/N = {top.snr:6.2f}]".format(
top=self.top_detection,
name=name,
size=len(self)
)
def __repr__(self):
return str(self)
class PulsarSearchWorker(object):
""" Function-like object that takes a single TimeSeries as an argument,
and outputs a list of detections. This is to circumvent a limitation of
multiprocessing.Pool.map() which requires the mapped function to:
- take just one argument.
- be pickle-able, and therefore be defined at the top level of a module
"""
def __init__(self, config):
"""
Parameters:
config : dict
Configuration parameters of the parent PulsarSearch object.
"""
self.config = config
def __call__(self, tseries):
ts, plan, pgram = ffa_search(tseries, **self.config['search'])
dets = find_peaks(pgram, **self.config['detect'])
return dets
class PulsarSearch(object):
""" Gets fed time series and accumulates detections at various DM trials
during processing. Once processing is over, build clusters out of the
accumulated detections. The main PipelineManager object is responsible
for removing harmonics and saving candidates.
The idea is to create one PulsarSearch object for different non-overlapping
period search ranges, where we search longer periods with more phase bins.
"""
def __init__(self, manager, config):
"""
Parameters:
manager: PipelineManager
Parent manager.
config : dict
Configuration parameters of the search. It is read from a
YAML config file, and updated by extra parameters from command
line arguments.
"""
self.manager = manager
self.config = config
self.configure_logger()
self._cumulative_walltime = 0.0
self.detections = []
self.clusters = []
@property
def name(self):
return self.config['name']
@property
def num_processes(self):
return self.manager.config['num_processes']
def cumulative_walltime(self):
""" Returns the total amount of time spent in the
process_time_series_batch() method, in seconds. """
return self._cumulative_walltime
def configure_logger(self):
logger_name = '.'.join(['PulsarSearch', self.name])
self.logger = get_logger(logger_name)
def process_time_series_batch(self, batch):
""" Processes several time series in parallel using the muliprocessing
module.
Parameters:
-----------
batch: list
list of TimeSeries to process.
"""
start_time = timeit.default_timer()
pool = Pool(processes=self.num_processes)
# Split the TimeSeries to search between processes
# 'output' is a list of lists of Detections
self.logger.info("Searching batch of {:d} TimeSeries using {:d} worker processes ...".format(len(batch), self.num_processes))
output = pool.map(PulsarSearchWorker(self.config), batch)
pool.close()
pool.join()
# Accumulate the new Detections
new_detections = [det for sublist in output for det in sublist]
self.logger.info("Search complete. New detections: {:d}".format(len(new_detections)))
self.detections = self.detections + new_detections
self.logger.info("Total detections stored: {:d}".format(len(self.detections)))
end_time = timeit.default_timer()
self._cumulative_walltime += (end_time - start_time)
self.logger.info("Total processing time: {:.2f} seconds".format(self.cumulative_walltime()))
def cluster_detections(self):
self.logger.info("Clustering Detections ...")
if not self.detections:
self.logger.info("No Detections in store. Nothing to be done.")
return
periods = np.asarray([det.period for det in self.detections])
tobs = np.median([det.metadata['tobs'] for det in self.detections])
dbi = tobs / periods
clrad = self.config['detect']['peak_clustering_radius']
cluster_indices = cluster_1d(dbi, clrad)
self.clusters = [
DetectionCluster([self.detections[ii] for ii in indices])
for indices in cluster_indices
]
self.logger.info("Clustering complete. Total Clusters: {0:d}".format(len(self.clusters)))
class PipelineManager(object):
""" Responsible for the outermost DM loop and top-level pulsar search
management. """
DETECTIONS_FILE_NAME = "detections.csv"
CLUSTERS_FILE_NAME = "clusters.csv"
SUMMARY_FILE_NAME = "summary.csv"
CANDIDATE_NAME_PREFIX = "riptide_cand"
def __init__(self, config_path, override_keys={}):
"""
Parameters:
-----------
config_path: str
Path to the YAML config file of the PipelineManager
override_keys: dict
Updated values for some keys of the YAML config file,
specified through the command line arguments of this script.
"""
self.logger = None
self.configure_logger()
self.config_path = os.path.realpath(config_path)
self.config = parse_yaml_config(self.config_path)
self.config.update(override_keys)
self.logger.info("Loaded PipelineManager configuration file: {:s}".format(self.config_path))
self.detections = []
self.clusters = []
self.candidates = []
self.configure_loaders()
self.configure_searches()
def configure_logger(self):
logger_name = 'PipelineManager'
self.logger = get_logger(logger_name)
def configure_searches(self):
self.searches = []
config_dir, config_name = os.path.split(self.config_path)
for search_config_fname in self.config['search_configs']:
search_config_path = os.path.join(config_dir, search_config_fname)
search = PulsarSearch(self, parse_yaml_config(search_config_path))
self.searches.append(search)
self.logger.info("Configured PulsarSearch '{:s}'".format(search.name))
self.logger.info("Configured a total of {:d} searches.".format(len(self.searches)))
def configure_loaders(self):
fmt = self.config['data_format'].strip().lower()
if fmt == 'presto':
self.loader = TimeSeries.from_presto_inf
self.dm_getter = lambda fname: PrestoInf(fname)['dm']
elif fmt == 'sigproc':
self.loader = TimeSeries.from_sigproc
self.dm_getter = lambda fname: SigprocHeader(fname)['refdm']
else:
raise ValueError("Invalid data format '{s}'".format(fmt))
self.logger.info("Specified file format: {:s}".format(fmt))
def select_dm_trials(self):
""" Build a list of files to process """
glob_pattern = self.config['glob']
filenames = sorted(glob.glob(glob_pattern))
self.logger.info("Found a total of {:d} file names corresponding to specified pattern \"{:s}\"".format(len(filenames), glob_pattern))
self.logger.info("Fetching DM trial values from headers. This may take a while ...")
dm_trials = {
self.dm_getter(fname): fname
for fname in filenames
}
self.logger.info("DM trial values have been read.")
# Helper iterator, used to select a sequence of DM trials according to
# config parameters
def iter_steps(sequence, vmin, vmax, step):
# Ignore steps outside of bounds
array = np.asarray(sorted(list(sequence)))
mask = (array >= vmin) & (array <= vmax)
array = array[mask]
# Yield values separated by at least 'step'
last = None
rtol = 1e-7 # Deal with float rounding errors
for value in array:
if last is None or value - last >= step * (1-rtol):
last = value
yield value
# Set max DM trial as a function of both the hard maximum limit and
# dmsinb_max
dm_min = self.config['dm_min']
dm_max = self.config['dm_max']
dm_step = self.config['dm_step']
dmsinb_max = self.config['dmsinb_max']
if filenames:
# Get coordinates of the first file in the list
tseries = self.loader(filenames[0])
skycoord = tseries.metadata['skycoord']
glat_radians = skycoord.galactic.b.rad
self.logger.info("Read galactic latitude from \"{:s}\" b = {:.3f} deg".format(filenames[0], skycoord.galactic.b.deg))
if dmsinb_max is not None:
msg = "Requested maximum value of DM x sin |b| ({:.2f}) corresponds to DM = {:.2f}".format(
dmsinb_max,
get_galactic_dm_limit(glat_radians, dmsinb_max)
)
self.logger.info(msg)
dm_min = get_lower_dm_limit(dm_trials.keys(), dm_min=dm_min)
dm_max = get_upper_dm_limit(dm_trials.keys(), glat_radians, dm_max=dm_max, dmsinb_max=dmsinb_max)
self.logger.info("Selecting DM trials in range [{:.3f}, {:.3f}] with a minimum step of {:.3f}".format(dm_min, dm_max, dm_step))
# NOTE: this is an iterator
dm_trial_values = iter_steps(dm_trials.keys(), dm_min, dm_max, dm_step)
self.dm_trial_paths = [dm_trials[value] for value in dm_trial_values]
self.logger.info("Selected {:d} DM trials to process".format(len(self.dm_trial_paths)))
def iter_batches(self):
""" Iterate through input time series in batches. Yields a list of
num_processes TimeSeries at each iteration. """
num_processes = self.config['num_processes']
paths = self.dm_trial_paths
num_dm_trials = len(paths)
self.logger.info("Preparing to iterate through DM trials. Number of input files: {:d}". format(num_dm_trials))
for batch in grouper(paths, num_processes):
tsbatch = list(map(self.loader, batch))
yield tsbatch
def fetch_detections(self):
""" Place all Detectionr objects from all the searches into a
single list. Give each Detection a new attribute tracking
which PulsarSearch it belongs to."""
self.detections = []
for search in self.searches:
for det in search.detections:
det.search = search
self.detections.append(det)
self.logger.info("Fetched a total of {:d} Detections".format(len(self.detections)))
def fetch_clusters(self):
""" Place all DetectionCluster objects from all the searches into a
single list. Give each DetectionCluster a new attribute tracking
which PulsarSearch it belongs to."""
self.clusters = []
for search in self.searches:
for cl in search.clusters:
cl.search = search
self.clusters.append(cl)
self.logger.info("Fetched a total of {:d} DetectionClusters".format(len(self.clusters)))
def remove_harmonics(self):
enabled = self.config['harmonic_filtering']['enabled']
if not enabled:
self.logger.info("Harmonic filtering is disabled.")
return
if not self.detections:
return
self.logger.info("Removing harmonics ...")
fmin = self.config['fmin']
fmax = self.config['fmax']
tobs = np.median([det.metadata['tobs'] for det in self.detections])
max_denominator = self.config['harmonic_filtering']['max_denominator']
snr_tol = self.config['harmonic_filtering']['snr_tol']
max_distance = self.config['harmonic_filtering']['max_distance']
self.clusters = sorted(self.clusters, key=lambda cl: cl.top_detection.snr, reverse=True)
cparams = list(map(DetectionCluster.to_dict, self.clusters))
cparams = flag_harmonics(
cparams,
fmin=fmin, fmax=fmax, tobs=tobs, max_denom=max_denominator,
max_distance=max_distance, snr_tol=snr_tol)
fundamentals = []
for cl, par in zip(self.clusters, cparams):
if par["is_harmonic"]:
fund = self.clusters[par["fundamental_index"]]
frac = par["fraction"]
msg = "{!s} is a harmonic of {!s} with period ratio {!s}".format(cl, fund, frac)
self.logger.debug(msg)
else:
fundamentals.append(cl)
num_harmonics = len(self.clusters) - len(fundamentals)
self.logger.info("Flagged {:d} harmonics".format(num_harmonics))
self.clusters = fundamentals
self.logger.info("Retained {:d} final Candidates".format(len(self.clusters)))
def _apply_candidate_filter(self, filter_name, func):
num_clusters = len(self.clusters)
valid_clusters = list(filter(func, self.clusters))
num_invalid = num_clusters - len(valid_clusters)
self.logger.info("Applied candidate filter \"{:s}\" on {:d} DetectionClusters: {:d} were removed".format(filter_name, num_clusters, num_invalid))
self.clusters = valid_clusters
def apply_candidate_filters(self):
""" Remove DetectionClusters that do not pass the filters specified into
the PipelineManager config file. """
params = self.config['candidate_filters']
dm_min = params['dm_min']
snr_min = params['snr_min']
max_number = params['max_number']
# NOTE: Don't forget to sort by decreasing S/N before applying the filters
self.clusters = sorted(self.clusters, key=lambda cl: cl.top_detection.snr, reverse=True)
if dm_min:
self._apply_candidate_filter(
"DM >= {:.2f}".format(dm_min),
lambda cl: cl.top_detection.dm >= dm_min)
if snr_min:
self._apply_candidate_filter(
"S/N >= {:.2f}".format(snr_min),
lambda cl: cl.top_detection.snr >= snr_min)
if max_number:
self.logger.info("Keeping only the top {:d} brightest candidates".format(max_number))
self.clusters = self.clusters[:max_number]
def build_candidates(self):
""" Turn remaining clusters (after applying filters) into candidates.
"""
self.logger.info("Building Candidates ...")
self.candidates = []
for cluster in self.clusters:
search = cluster.search
# Get the original parameters of the PulsarSearch that Found
# this cluster
rmed_width = search.config['search']['rmed_width']
rmed_minpts = search.config['search']['rmed_minpts']
nbins = search.config['candidates']['nbins']
nsubs = search.config['candidates']['nsubs']
# Re-load TimeSeries associated to the top detection, and run
# the same pre-processing again.
fname = cluster.top_detection.metadata['fname']
try:
tseries = self.loader(fname)
tseries.deredden(rmed_width, minpts=rmed_minpts, inplace=True)
tseries.normalise(inplace=True)
candidate = Candidate.from_pipeline_output(cluster, tseries, nbins=nbins, nsubs=nsubs, logger=self.logger)
self.candidates.append(candidate)
except Exception as error:
self.logger.error("ERROR: Failed to build candidate from {!s}. Reason: {!s}".format(cluster, error))
self.candidates = sorted(self.candidates, key=lambda cd: cd.metadata['best_snr'], reverse=True)
self.logger.info("Done building candidates.")
def save_detections(self):
""" Save detection parameters to pandas.DataFrame """
if not self.detections:
return
outdir = self.config['outdir']
fname = os.path.join(outdir, self.DETECTIONS_FILE_NAME)
self.logger.info("Saving pandas.DataFrame with parameters of all {:d} Detections to file {:s}".format(len(self.detections), fname))
columns = ['search_name', 'period', 'dm', 'width', 'ducy', 'snr']
data = []
for det in self.detections:
entry = (det.search.config['name'], det.period, det.dm, det.width, det.ducy, det.snr)
data.append(entry)
data = | pandas.DataFrame(data, columns=columns) | pandas.DataFrame |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
# HistoryPanel should be empty if no value is given
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
# HistoryPanel should also be empty if empty value (np.array([])) is given
empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes)
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
print('test creating HistoryPanel with very limited data')
print('test creating HistoryPanel with 2D data')
temp_data = np.random.randint(10, size=(7, 3)).astype('float')
temp_hp = qt.HistoryPanel(temp_data)
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(TypeError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_segment(self):
"""测试历史数据片段的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test segment with None parameters')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20150202')
seg3 = test_hp.segment(end_date='20201010')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp.values
))
self.assertTrue(np.allclose(
seg2.values, test_hp.values
))
self.assertTrue(np.allclose(
seg3.values, test_hp.values
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates)
self.assertEqual(seg3.hdates, test_hp.hdates)
print(f'Test segment with proper dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160704')
seg3 = test_hp.segment(start_date='2016-07-05',
end_date='20160708')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 2:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[2:6])
print(f'Test segment with non-existing but in range dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160703')
seg3 = test_hp.segment(start_date='2016-07-03',
end_date='20160710')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 1:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[1:6])
print(f'Test segment with out-of-range dates')
seg1 = test_hp.segment(start_date='2016-05-03',
end_date='20160910')
self.assertIsInstance(seg1, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
def test_slice(self):
"""测试历史数据切片的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test slice with shares')
share = '000101'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101']))
share = '000101, 000103'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101', '000103'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101, 000103']))
print(f'Test slice with htypes')
htype = 'open'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open']))
htype = 'open, close'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open, close']))
# test that slicing of "open, close" does NOT equal to "close, open"
self.assertFalse(np.allclose(slc.values, test_hp['close, open']))
print(f'Test slicing with both htypes and shares')
share = '000103, 000101'
htype = 'high, low, close'
slc = test_hp.slice(shares=share, htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000103', '000101'])
self.assertEqual(slc.htypes, ['high', 'low', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['high, low, close', '000103, 000101']))
print(f'Test Error cases')
# duplicated input
htype = 'open, close, open'
self.assertRaises(AssertionError, test_hp.slice, htypes=htype)
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
| pd.Timestamp('1970-01-01 00:00:00.000000008') | pandas.Timestamp |
import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
# Only available in pandas 1.2+
# When this class is defined, we can also use `.str` on fletcher columns.
from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
"""
There seems to be a bug in pandas for this edge case
>>> pd.Series(['']).str.replace('', 'abc', n=1)
0
dtype: object
But
>>> pd.Series(['']).str.replace('', 'abc')
0 abc
dtype: object
I believe the second result is the correct one and this is what the
fletcher implementation returns.
"""
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string()):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
ser_fr = _fr_series_from_data(data, fletcher_variant)
ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def _check_series_equal(result_fr, result_pd):
result_fr = result_fr.astype(result_pd.dtype)
tm.assert_series_equal(result_fr, result_pd)
def _check_str_to_t(
t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs
):
"""Check a .str. function that returns a series with type t."""
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, func)(*args, **kwargs)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
_check_series_equal(result_fr, result_pd)
def _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)
@string_patterns
def test_text_endswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("endswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_text_startswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("startswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_contains_no_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=False
)
@pytest.mark.parametrize(
"data, pat, expected",
[
([], "", []),
(["a", "b"], "", [True, True]),
(["aa", "Ab", "ba", "bb", None], "a", [True, False, True, False, None]),
],
)
def test_contains_no_regex_ascii(data, pat, expected, str_accessor, fletcher_variant):
if str_accessor == "str":
pytest.skip(
"return types not stable yet, might sometimes return null instead of bool"
)
return
fr_series = _fr_series_from_data(data, fletcher_variant)
fr_expected = _fr_series_from_data(expected, fletcher_variant, pa.bool_())
# Run over slices to check offset handling code
for i in range(len(data)):
ser = fr_series.tail(len(data) - i)
expected = fr_expected.tail(len(data) - i)
result = getattr(ser, str_accessor).contains(pat, regex=False)
tm.assert_series_equal(result, expected)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
def test_contains_no_regex_case_sensitive(data_tuple, str_accessor, fletcher_variant):
data, pat, test_offset = data_tuple
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
case=True,
regex=False,
)
@string_patterns
def test_contains_no_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=False,
case=False,
)
regex_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", None], "a"),
(["aa", "ab", "ba", None], "a$"),
(["aa", "ab", "ba", None], "^a"),
(["Aa", "ab", "ba", None], "A"),
(["aa", "AB", "ba", None], "A$"),
(["aa", "AB", "ba", None], "^A"),
],
)
@regex_patterns
def test_contains_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=True
)
@regex_patterns
def test_contains_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=True,
case=False,
)
@settings(deadline=None)
@given(
data_tuple=string_patterns_st(),
n=st.integers(min_value=0, max_value=10),
repl=st.sampled_from(["len4", "", "z"]),
)
@example(
data_tuple=(["aababaa"], "aabaa", 0),
repl="len4",
n=1,
fletcher_variant="continuous",
)
@example(data_tuple=(["aaa"], "a", 0), repl="len4", n=1, fletcher_variant="continuous")
def test_replace_no_regex_case_sensitive(
data_tuple, repl, n, str_accessor, fletcher_variant
):
data, pat, test_offset = data_tuple
_check_str_to_str(
"replace",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
repl=repl,
n=n,
case=True,
regex=False,
)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
@example(data_tuple=(["a"], "", 0), fletcher_variant="chunked")
def test_count_no_regex(data_tuple, str_accessor, fletcher_variant):
"""Check a .str. function that returns a series with type t."""
data, pat, test_offset = data_tuple
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, "count")(pat=pat)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
kwargs = {}
if str_accessor.startswith("fr_"):
kwargs["regex"] = False
result_fr = getattr(ser_fr, str_accessor).count(pat=pat, **kwargs)
_check_series_equal(result_fr, result_pd)
def _optional_len(x: Optional[str]) -> int:
if x is not None:
return len(x)
else:
return 0
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_zfill(data, str_accessor, fletcher_variant):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
max_str_len = ser_pd.map(_optional_len).max()
if pd.isna(max_str_len):
max_str_len = 0
arrow_data = pa.array(data, type=pa.string())
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = | pd.Series(fr_array) | pandas.Series |
import os
import glob
import argparse
import pandas as pd
import xml.etree.ElementTree as ET
def process(path, prefix):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (prefix + root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
return xml_list
def xml_to_csv(path, subfolders, folder):
xml_list = []
if subfolders is None or len(subfolders) == 0:
xml_list += process(path + '/' + folder, '')
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = | pd.DataFrame(xml_list, columns=column_name) | pandas.DataFrame |
import logging
import time
from functools import reduce
from typing import List, Iterator, Callable, Any
import pandas
log = logging.getLogger(__name__)
def flatten(l: Iterator[Any]) -> Iterator[Any]:
"""
Thanks to this StackOverflow answer: https://stackoverflow.com/a/10824420
"""
for i in l:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i
def upsert(
update: pandas.DataFrame, using: pandas.DataFrame, cols: List[str]
) -> pandas.DataFrame:
indices = map(lambda x: ~update[x].isin(using[x]), cols)
indices = reduce((lambda x, y: x | y), indices)
updated = | pandas.concat([update[indices], using], ignore_index=True) | pandas.concat |
from myutils.utils import getConnection, cronlog
import pandas as pd
import numpy as np
import datetime
import requests
class TestRequest:
def __init__(self, url, method='GET', META=None, postdata=None):
self.method = method
u = url.split('?')
self.path_info = u[0]
self.META = META or {}
self.GET = {}
if len(u)>1:
for x in u[1].split('&'):
y = x.split('=')
if len(y)==1:
self.GET[x] = ''
else:
self.GET[y[0]] = y[1]
self.PUT = postdata
def get_full_path(self):
return url
conn, cur = getConnection()
if False:
s = """
DROP TABLE IF EXISTS price_function;
CREATE TABLE price_function (
id smallserial PRIMARY KEY
, date DATE NOT NULL
, slope FLOAT(8) NOT NULL
, intercept FLOAT(8) NOT NULL
, r FLOAT(8) NOT NULL
, created_on TIMESTAMP NOT NULL
);
"""
cur.execute(s)
conn.commit()
if False:
s = """
INSERT INTO price_function (date, slope, intercept, r, created_on)
VALUES
('2020-07-05', 3, 2.8, 0.9, CURRENT_TIMESTAMP),
('2020-07-04', 2., 2.9, 0.7, CURRENT_TIMESTAMP);
"""
cur.execute(s)
conn.commit()
s = 'select * from price_function;'
cur.execute(s)
list_tables = cur.fetchall()
print(list_tables)
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS price_forecast;
CREATE TABLE price_forecast (
id serial PRIMARY KEY
, datetime TIMESTAMP NOT NULL
, demand Float(8) NOT NULL
, solar Float(8) NOT NULL
, wind Float(8) NOT NULL
, price Float(4) NOT NULL
, created_on TIMESTAMP NOT NULL
);
"""
cur.execute(s)
conn.commit()
if False:
s = """
DROP TABLE IF EXISTS testing;
CREATE TABLE testing (
id serial PRIMARY KEY
, created_on TIMESTAMP NOT NULL
); """
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_periods;
CREATE TABLE sm_periods (
period_id serial PRIMARY KEY
, period Char(16) not null
, local_date Date not null
, local_time char(5) not null
, timezone_adj smallint not null
);
"""
cur.execute(s)
conn.commit()
df_idx = pd.date_range(datetime.datetime(2019,1,1), datetime.datetime(2020,10,1), freq='30min')
df_idx_local = df_idx.tz_localize('UTC').tz_convert('Europe/London')
df = pd.DataFrame(index=df_idx)
df['period'] = df_idx.strftime('%Y-%m-%d %H:%M')
df['local_date'] = df_idx_local.strftime('%Y-%m-%d')
df['local_time'] = df_idx_local.strftime('%H:%M')
df['timezone_adj'] = df_idx_local.strftime('%z').str[0:3].astype(int)
df.reset_index(inplace=True)
start = """
INSERT INTO sm_periods (period_id, period, local_date, local_time, timezone_adj)
VALUES
"""
s=""
for i, j in df.iterrows():
s+= "({},'{}', '{}', '{}', {}),".format(i, j['period'], j['local_date'],j['local_time'], j['timezone_adj'])
if (i+1)%1000==0:
print('done: {}'.format(i+1))
cur.execute(start + s[:-1] + ';')
conn.commit()
s=""
print('done: {}'.format(i+1))
cur.execute(start + s[:-1] + ';')
conn.commit()
s=""
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_accounts;
CREATE TABLE sm_accounts (
account_id serial PRIMARY KEY
, type_id smallint not null
, first_period varChar(16) not null
, last_period varChar(16) not null
, last_updated TIMESTAMP not null
, hash varChar(64) not null
, region varChar(1)
, source_id smallint not null
);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_quantity;
CREATE TABLE sm_quantity (
id serial PRIMARY KEY
, account_id integer not null
, period_id integer not null
, quantity float(8) not null
);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_hh_variables;
CREATE TABLE sm_hh_variables (
var_id serial PRIMARY KEY
, var_name varchar(32) not null
, var_type varchar(32));
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_d_variables;
CREATE TABLE sm_d_variables (
var_id serial PRIMARY KEY
, var_name varchar(32) not null
, var_type varchar(32));
"""
cur.execute(s)
conn.commit()
if False: # Creates new hh tariff variables in sm_hh_variables and sm_tariffs
product = 'AGILE-OUTGOING-19-05-13'
type_id=2
s = f"""
delete from sm_hh_variables where var_name like '{product}%';
delete from sm_tariffs where product='{product}';
"""
cur.execute(s)
conn.commit()
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"""
INSERT INTO sm_hh_variables (var_name) values ('{product}-{region}');
"""
cur.execute(s)
conn.commit()
s = f"select var_id from sm_hh_variables where var_name='{product}-{region}';"
cur.execute(s)
var_id = cur.fetchone()[0]
conn.commit()
s = f"""
INSERT INTO sm_tariffs (type_id, product, region, granularity_id, var_id) values
({type_id}, '{product}', '{region}', 0, {var_id});
"""
cur.execute(s)
conn.commit()
START='201901010000'
if False: #Inserts initial prices into hh tariff variables
import requests
idx = pd.date_range(START, '202101010000', freq='30T')
df = pd.DataFrame()
df['timestamp'] = idx
df = pd.DataFrame(idx, columns=['timestamp'])
for region in ['B','C','D','E','F','G','H','J','K','L','M','N','P']:
tariff = 'AGILE-OUTGOING-19-05-13'
url = ('https://api.octopus.energy/v1/products/{}/' +
'electricity-tariffs/E-1R-{}-{}/standard-unit-rates/' +
'?period_from={}Z&period_to={}Z&page_size=15000')
url = url.format(tariff, tariff, region,
df.timestamp.iloc[0].strftime('%Y-%m-%dT%H:%M'),
df.timestamp.iloc[-1].strftime('%Y-%m-%dT%H:%M'))
r = requests.get(url)
dfs = []
dfs.append(pd.DataFrame(r.json()['results'])[['valid_from','value_exc_vat']])
while r.json()['next'] is not None:
r = requests.get(r.json()['next'])
dfs.append(pd.DataFrame(r.json()['results'])[['valid_from','value_exc_vat']])
if len(dfs)>30:
raise Exception
dfs = pd.concat(dfs)
dfs['timestamp'] = pd.DatetimeIndex(dfs.valid_from.str[:-1])
dfs = df.merge(right=dfs, how='left', on='timestamp')
dfs = dfs[dfs.value_exc_vat.notna()]
s = f"select var_id from sm_hh_variables where var_name='{tariff}-{region}';"
cur.execute(s)
var_id = cur.fetchone()[0]
conn.commit()
print(f'{var_id} {tariff} {region}' )
s = """
delete from sm_hh_variable_vals where var_id={};
"""
s = s.format(var_id)
cur.execute(s)
conn.commit()
s = """
INSERT INTO sm_hh_variable_vals (var_id, period_id, value) values
"""
s = s.format(var_id)
for i, j in dfs.iterrows():
s+= " ({}, {}, {}),".format(var_id, i, j.value_exc_vat)
s = s[:-1] + ';'
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_hh_variable_vals;
CREATE TABLE sm_hh_variable_vals (
id serial primary key
, var_id integer not null
, period_id integer not null
, value float(8) not null);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_d_variable_vals;
CREATE TABLE sm_d_variable_vals (
id serial primary key
, var_id integer not null
, local_date date not null
, value float(8) not null);
"""
cur.execute(s)
conn.commit()
from myutils.utils import loadDataFromDb
if False: #Creates daily tracker variables
product = 'SILVER-2017-1'
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"""
insert into sm_d_variables (var_name) values ('{product}-{region}') returning var_id; """
var_id = loadDataFromDb(s)[0][0]
print(var_id)
s = f"""
insert into sm_tariffs (product, region, var_id, type_id, granularity_id) values
('{product}', '{region}', {var_id}, 1, 1); """
loadDataFromDb(s)
if False:
product = 'SILVER-2017-1'
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"select var_id from sm_variables where product='{product}' and region='{region}' ;"
var_id = loadDataFromDb(s)[0][0]
r = requests.get(f'https://octopus.energy/api/v1/tracker/G-1R-SILVER-2017-1-{region}/daily/past/540/1/')
dates = [x['date'] for x in r.json()['periods']]
prices = [x['unit_rate'] for x in r.json()['periods']]
d = pd.Series(prices, index=dates)
d = d[:datetime.date.today().strftime('%Y-%m-%d')]
d = d/1.05
d = d.round(2)
s = 'insert into sm_d_variable_vals (var_id, local_date, value) values '
for i, j in d.iteritems():
s+= f"({var_id}, '{i}', {j}),"
s = s[:-1]+';'
loadDataFromDb(s)
print(region)
if False:
conn.commit()
import requests
idx = pd.date_range(START, '202101010000', freq='30T')
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import random
# Date and Time
# =============
print(datetime.datetime(2000, 1, 1))
print(datetime.datetime.strptime("2000/1/1", "%Y/%m/%d"))
print(datetime.datetime(2000, 1, 1, 0, 0).strftime("%Y%m%d"))
# to_datetime
# ===========
print(pd.to_datetime("4th of July"))
print(pd.to_datetime("13.01.2000"))
print(pd.to_datetime("7/8/2000"))
print(pd.to_datetime("7/8/2000", dayfirst=True))
print(issubclass(pd.Timestamp, datetime.datetime))
ts = pd.to_datetime(946684800000000000)
print(ts.year, ts.month, ts.day, ts.weekday())
index = [pd.Timestamp("2000-01-01"),
pd.Timestamp("2000-01-02"),
pd.Timestamp("2000-01-03")]
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts)
print(ts.index)
ts = pd.Series(np.random.randn(len(index)),
index=["2000-01-01", "2000-01-02", "2000-01-03"])
print(ts.index)
index = pd.to_datetime(["2000-01-01", "2000-01-02", "2000-01-03"])
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts.index)
print(pd.date_range(start="2000-01-01", periods=3, freq='H'))
print(pd.date_range(start="2000-01-01", periods=3, freq='T'))
print(pd.date_range(start="2000-01-01", periods=3, freq='S'))
print(pd.date_range(start="2000-01-01", periods=3, freq='B'))
print(pd.date_range(start="2000-01-01", periods=5, freq='1D1h1min10s'))
print(pd.date_range(start="2000-01-01", periods=5, freq='12BH'))
bh = | pd.tseries.offsets.BusinessHour(start='07:00', end='22:00') | pandas.tseries.offsets.BusinessHour |
#!/usr/bin/env python3
import re
import tqdm
import sqlite3
import matplotlib.pyplot as plt
import itertools
import pandas as pd
from ShortestPathDepParse import Dependencies
from EntitiesExtraction import EntityExtractor, Entity
from nltk import CoreNLPParser
import numpy as np
np.random.seed(17)
punctuation = r'[\.,;:\?!()\[\]\{\}«»\'\"\—’&\+]' #\- for now
digits = "([0-9])"
fr_stopwords = ["alors","au","aucuns","aussi","autre","avec","car","ce","cet","cela","ces","ceux","ci","comme","comment",
"dans","des","du","donc","elle","elles","en","est","et","eu","un", "une", "par", "plus", "moins", "aux",
"ici","il","ils","je","juste","la","le","les","leur","là","ma","mais","mes","mine","moins","mon","mot",
"ni","notre","nous","ou","où","parce","pas","peut","pour","quand","que","quel","quelle","quelles","on", "ont", "ne", "qu", "vers", "été",
"était", "être", "avant", "après", "jusque","jusqu","depuis", "avoir",
"quels","qui","sa","sans","ses","si","sien","son","sont","sous","sur","ta","tandis","tellement","tels","tes","ton","tous",
"tout","trop","très","tu","votre","vous","vu","ça","sa", "son", "ses", "de", "a"]
en_stopwords = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours",
"yourself", "yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself",
"it", "its", "itself", "they", "them", "their", "theirs", "themselves", "what", "which",
"who", "whom", "this", "that", "these", "those", "am", "is", "are", "was", "were", "be",
"been", "being", "have", "has", "had", "having", "do", "does", "did", "doing", "a", "an",
"the", "and", "but", "if", "or", "because", "as", "until", "while", "of", "at", "by", "for",
"with", "about", "against", "between", "into", "through", "during", "before", "after", "above",
"below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under", "again", "further",
"then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few",
"more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so",
"than", "too", "very", "s", "t", "can", "will", "just", "don", "should", "now", "even"]
pos_tagger = CoreNLPParser('http://localhost:9004', tagtype='pos')
#def pairwise(iterable):
# "s -> (s0,s1), (s1,s2), (s2, s3), ..."
# a, b = itertools.tee(iterable)
# next(b, None)
#return zip(a, b)
class PairOfEntitiesFeatures():
"""
For a given pair of entities in a sentence, find the features between them
Features for now include :
- surface form entity 1
- surface form entity 2
- type entity 1 (PER, ORG, LOC...)
- type entity 2 (PER, ORG, LOC...)
- words between entites
- x words before the entity 1
- x words after entity 2
- shortest dependency path between two entities
Only function to use : get_features()
"""
def __init__(self, ent1, ent2, text, insecables):
self.ent1=ent1
self.ent2=ent2
self.text = text
self.ent1text = None
self.ent2text = None
self.ent1type= None
self.ent2type=None
self.dependency_graph = Dependencies(self.text, port = 9004, insecables=insecables)
self.words =None
self.words_before=None
self.words_after=None
self.shortest_dependency_path_w = None
self.shortest_dependency_path_p = None
self.shortest_dependency_path_t = None
self.num_verbs = None #number of verbs between two entities
self.num_punct = None #number of punctuation signs between entities (we assume a lot will say it's a dirent sentence)
self.num_sconj = None #number of subornative conjunction(qui, que, qu'tel que...)
self.N = 20
def _get_id_entities(self):
"""
Keep the id of the entities,
So that later we can go find their properties in the 'entities' table
"""
self.id1 = self.ent1.id
self.id2 = self.ent2.id
def _get_entities_surface_form(self):
self.ent1text = self.ent1.surface_form.replace(" "," ") #re.sub(r'[ ]+', " ", re.sub(punctuation, " ", self.ent1.text))
self.ent2text = self.ent2.surface_form.replace(" "," ") #re.sub(r'[ ]+', " ", re.sub(punctuation, " ", self.ent2.text))
def _get_type_entities(self):
self.ent1type = self.ent1.label
self.ent2type = self.ent2.label
#@profile
def _get_shortest_dependency_path(self):
self.shortest_dependency_path_w, self.shortest_dependency_path_p, self.shortest_dependency_path_t = self.dependency_graph.shortest_path(self.ent1text, self.ent2text, 0, 0)
#@profile
def get_features(self):
"""
Outputs the PairOfEntities object attributes as a list
List of :
- surface form entity 1
- surface form entity 2
- type entity 1 (PER, ORG, LOC...)
- type entity 2 (PER, ORG, LOC...)
- words between entites
- x words before the entity 1
- x words after entity 2
- shortest dependency path between two entities
"""
self._get_entities_surface_form()
self._get_type_entities()
self._get_shortest_dependency_path()
self._get_id_entities()
pairfeatures = [self.id1, self.id2, self.ent1text, self.ent2text, self.ent1type, self.ent2type,
self.shortest_dependency_path_p, self.shortest_dependency_path_w, self.shortest_dependency_path_t, self.text]
return(pairfeatures)
#@profile
def clean_text(self, text):
"""
With the context given as list of strings (list of words), we return a list of stemmed words, removing stop words.
"""
text_list = [item.lower() for item in text if (item.lower() not in fr_stopwords)]
text_clean = [item for item in text_list if ((len(item)>1)&(re.search(digits, item)==None))] #re.search(punctuation, item)==None)&
return(text_clean)
class FeaturesCreationSQL():
def __init__(self, dbfile= "/home/cyrielle/Codes/clean_code/DataStorage/small_wiki/small_wiki.db", first_time=True):
self.db = dbfile
self.first_time = first_time
def data_sample_chose(self, ratio=1):
"""
Count how many ones we have in the entities pairs and where
"""
conn = sqlite3.connect(self.db)
cursor = conn.cursor()
cursor.execute("SELECT rowid FROM entities_pairs \
WHERE relation_id is not null\
AND entities_pairs.entity1_wikidata_id is not null\
AND entities_pairs.entity2_wikidata_id is not null")
indexes_1 = [c[0] for c in cursor.fetchall()]
cursor.execute("SELECT rowid FROM entities_pairs \
WHERE relation_id is null\
AND entities_pairs.entity1_wikidata_id is not null\
AND entities_pairs.entity2_wikidata_id is not null")
indexes_0_initial = [c[0] for c in cursor.fetchall()]
indexes_0 = list(np.random.choice(np.array(indexes_0_initial), 200000, replace=False)) #ratio*len(indexes_0_initial)
#all the indics we finall take in final :
indexes = indexes_0 + indexes_1
print(len(indexes_0), len(indexes_1))
return(indexes)
def execute_SQL(self, ratio, filepath):
"""
Given a list of pairs of entities, get the features of said pair
"""
id_pairs = self.data_sample_chose(ratio = ratio)
total_list =[]
conn = sqlite3.connect(self.db, isolation_level=None)
cursor = conn.cursor()
query = "SELECT entities_pairs.entity1_id, entities_pairs.entity2_id, ent1.start, ent1.end, ent1.entity_type, ent1.wikidata_id,\
ent2.start, ent2.end, ent2.entity_type, ent2.wikidata_id, sentences.text, sentences.rowid, entities_pairs.relation_id, entities_pairs.rowid\
FROM entities_pairs \
LEFT JOIN entities as ent1 ON entities_pairs.entity1_id==ent1.rowid \
LEFT JOIN entities as ent2 ON entities_pairs.entity2_id==ent2.rowid \
LEFT JOIN sentences ON sentences.rowid= entities_pairs.id_sentence\
WHERE entities_pairs.rowid in ({})".format(','.join(str(ind) for ind in id_pairs))
cursor.execute(query)
cursor2 = conn.cursor()
query2 ="SELECT entities.surface_form, entities.id_sentence FROM entities"
cursor2.execute(query2)
dic_fetch = 0
dic_entities_sentences = {}
while True:
dic_fetch = cursor2.fetchone()
if dic_fetch == None:
break
entity_surface_form, id_sent = dic_fetch
if id_sent in dic_entities_sentences :
dic_entities_sentences[id_sent].append(entity_surface_form)
else :
dic_entities_sentences[id_sent] = [entity_surface_form]
while True:
pair = cursor.fetchone()
if pair == None:
break
entid1, entid2, start1, end1, type1, wikidata_id1, start2, end2, type2, wikidata_id2, sentence, id_sentence, relation, id_pair = pair
#quick fix for weirdo sentences :
if "├" in sentence :
break
#elif len(list(re.finditer(',', sentence)))>7 : #very likely an enumeration, would probs no help
# break
if start1 < start2:
e1 = Entity(entid1, int(end1), int(start1), sentence, type1, wikidata_id1)
e2 = Entity(entid2, int(end2), int(start2), sentence, type2, wikidata_id2)
else :
e2 = Entity(entid1, int(end1), int(start1), sentence, type1, wikidata_id1)
e1 = Entity(entid2, int(end2), int(start2), sentence, type2, wikidata_id2)
#insecables are the entities that are made of several words (Saint-Exupery, Francois Hollande...) but that need to be seen as one node in the dependency graph
insecables = dic_entities_sentences[id_sentence]
feature_pair = PairOfEntitiesFeatures(e1, e2, sentence, insecables).get_features()
feature_pair.append(id_pair)
feature_pair.append(relation)
total_list.append(feature_pair)
feats = pd.DataFrame(total_list, columns= ["id1", "id2", 'entity1', 'entity2', 'ent1type', 'ent2type',
'shortest_dependency_path_p', "shortest_dependency_path_w","shortest_dependency_path_t",
"original_sentence","id_entities_pair",
"relation"])
entity_types = ["", "association",
"commune",
"date",
"departement",
"epci",
"institution",
"lieu",
"ong",
"organisation",
"partipolitique",
"pays",
"personne",
"region",
"societe",
"syndicat"]
feats_0 = feats[pd.isna(feats["relation"])]
feats_1 = feats[pd.notna(feats["relation"])]
#Get the max number of any combinations
print(feats_1.groupby(["ent1type", "ent2type"]).count())
lim1 = int(feats_1.groupby(["ent1type", "ent2type"]).count().quantile(0.95)[0])
print("Quartile 0.95 of the distribution of number of examples per couples of entities, label 1: ", lim1)
lim0 = lim1
#Now the selection of the types of entities
df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
| assert_almost_equal(df.values, values) | pandas.util.testing.assert_almost_equal |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#from sklearn import metrics
import scipy
#Regressao linear com Adaline e Pseudo-Inversa
entrada = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
target = np.array([2.26, 3.8, 4.43, 5.91, 6.18, 7.26, 8.15, 9.14, 10.87, 11.58, 12.55])
################## TESTE COM ADALINE ############################
def y_liquid(entrada, peso, bias):
y = entrada * peso + bias
return y
def training(entrada, saida, alfa, iteracoes):
bias = np.random.rand(1)-0.5
peso = np.random.rand(1)-0.5
EQT = 100
EQT2 = 50
precisao = 0.000001
count = 0
#for i in range(iteracoes):
while abs(EQT - EQT2) > precisao:
EQT = EQT2
EQT2 = 0
count +=1
for i in range(len(entrada)):
y = y_liquid(entrada[i], peso, bias)
EQT2 += 0.5*((saida[i] - y)**2)
peso += entrada[i] * (saida[i] - y) * alfa
bias += (saida[i] - y) * alfa
print("Total de iteracoes: " + str(count))
print("Variação do Erro Quadratico Total: " + str(abs(EQT2-EQT)))
return peso, bias
def fit(entrada, peso, bias):
for i in range(len(entrada)):
y = y_liquid(entrada[i], peso, bias)
print("Para a entrada {0}, o Y calculado é {1}".format(entrada[i],y))
def metricas():
EMQ = 0
NUM = []
DEN = []
PN = []
PD = []
PD1 = []
Y = []
for i in range(len(entrada)):
y = y_liquid(entrada[i], peso, bias)
Y.append(y)
EMQ += (target[i] - y)**2/len(target)
NUM.append(((target[i] - y)**2))
DEN.append((target[i] - np.mean(target))**2)
PN.append((entrada[i] - np.mean(entrada)) * (target[i] - np.mean(target)))
PD.append((entrada[i] - np.mean(entrada)) ** 2)
PD1.append((target[i] - np.mean(target)) ** 2)
Person = sum(PN) /(np.sqrt(sum(PD)*sum(PD1)))
R2 = 1 - (sum(NUM)/sum(DEN))
print("Erro Médio Quadrático: " + str(EMQ)) #"EMQ: " + str(metrics.mean_squared_error(target,Y)))
print("R-square: " + str(R2)) #"R2: " + str(metrics.r2_score(target,Y)))
print("Coeficiente de Person: " + str(Person) + '\n')
def plot():
x, y = | pd.Series(entrada, name="X") | pandas.Series |
"""Unit tests for orbitpy.coveragecalculator.gridcoverage class.
``TestGridCoverage`` class:
* ``test_execute_0``: Test format of output access files.
* ``test_execute_1``: Roll Circular sensor tests
* ``test_execute_2``: Yaw Circular sensor tests
* ``test_execute_3``: Pitch Circular sensor tests
* ``test_execute_4``: Roll Rectangular sensor tests
* ``test_execute_5``: Pitch Rectangular sensor tests
* ``test_execute_6``: Satellite-bus orientation vs sensor orientation tests
* ``test_execute_7``: Test spacecraft with multiple sensors.
* ``test_execute_8``: Test FOV vs FOR coverage. Coverage of FOR >= Coverage of FOV.
* ``test_execute_9``: Test coverage with DOUBLE_ROLL_ONLY maneuver will which result in 2 ``ViewGeometry`` objects for the field-of-regard.
"""
import json
import os, shutil
import sys
import unittest
import pandas as pd
import random
import warnings
import json
from orbitpy.coveragecalculator import CoverageOutputInfo, GridCoverage
from orbitpy.grid import Grid
from orbitpy.util import Spacecraft
from orbitpy.propagator import PropagatorFactory
sys.path.append('../')
from util.spacecrafts import spc1_json, spc4_json, spc5_json
RE = 6378.137 # radius of Earth in kilometers
class TestGridCoverage(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create new working directory to store output of all the class functions.
cls.dir_path = os.path.dirname(os.path.realpath(__file__))
cls.out_dir = os.path.join(cls.dir_path, 'temp')
if os.path.exists(cls.out_dir):
shutil.rmtree(cls.out_dir)
os.makedirs(cls.out_dir)
# make propagator
factory = PropagatorFactory()
cls.step_size = 1
cls.j2_prop = factory.get_propagator({"@type": 'J2 ANALYTICAL PROPAGATOR', "stepSize": cls.step_size})
def test_from_dict(self):
o = GridCoverage.from_dict({ "grid":{"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2},
"spacecraft": json.loads(spc1_json),
"cartesianStateFilePath":"../../state.csv",
"@id": 12})
self.assertEqual(o._id, 12)
self.assertEqual(o._type, 'GRID COVERAGE')
self.assertEqual(o.grid, Grid.from_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2}))
self.assertEqual(o.spacecraft, Spacecraft.from_json(spc1_json))
self.assertEqual(o.state_cart_file, "../../state.csv")
def test_to_dict(self): #TODO
pass
def test_execute_0(self):
""" Check the produced access file format.
"""
# setup spacecraft with some parameters setup randomly
duration=0.05
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+random.uniform(350,850),
"ecc": 0, "inc": random.uniform(0,180), "raan": random.uniform(0,360),
"aop": random.uniform(0,360), "ta": random.uniform(0,360)}
}
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": random.uniform(5,35) },
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "@id":"bs1", "@type":"Basic Sensor"}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 1})
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access) # the first instrument, mode available in the spacecraft is considered for the coverage calculation.
# check the outputs
cov_calc_type = pd.read_csv(out_file_access, nrows=1, header=None).astype(str) # 1st row contains the coverage calculation type
cov_calc_type = str(cov_calc_type[0][0])
self.assertEqual(cov_calc_type, 'GRID COVERAGE')
epoch_JDUT1 = pd.read_csv(out_file_access, skiprows = [0], nrows=1, header=None).astype(str) # 2nd row contains the epoch
epoch_JDUT1 = float(epoch_JDUT1[0][0].split()[3])
self.assertEqual(epoch_JDUT1, 2458265.0)
_step_size = pd.read_csv(out_file_access, skiprows = [0,1], nrows=1, header=None).astype(str) # 3rd row contains the stepsize
_step_size = float(_step_size[0][0].split()[4])
self.assertAlmostEqual(_step_size, self.step_size)
_duration = pd.read_csv(out_file_access, skiprows = [0,1,2], nrows=1, header=None).astype(str) # 4th row contains the mission duration
_duration = float(_duration[0][0].split()[4])
self.assertAlmostEqual(_duration, duration)
column_headers = pd.read_csv(out_file_access, skiprows = [0,1,2,3], nrows=1, header=None).astype(str) # 5th row contains the columns headers
self.assertEqual(column_headers.iloc[0][0],"time index")
self.assertEqual(column_headers.iloc[0][1],"GP index")
self.assertEqual(column_headers.iloc[0][2],"lat [deg]")
self.assertEqual(column_headers.iloc[0][3],"lon [deg]")
# check that the grid indices are interpreted correctly
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
access_data = access_data.round(3)
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(lat==access_data['lat [deg]'].tolist())
self.assertTrue(lon==access_data['lon [deg]'].tolist())
else:
warnings.warn('No data was generated in test_execute_0(.). Run the test again.')
def test_execute_1(self):
""" Orient the sensor with roll, and an equatorial orbit and check that the ground-points captured are on either
side of hemisphere only. (Conical Sensor)
"""
############ Common attributes for both positive and negative roll tests ############
duration = 0.1
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2})
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 20,
"aop": 0, "ta": 120}
}
############ positive roll ############
# setup spacecraft with some parameters setup randomly
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_accessX.csv'
# run the coverage calculator
cov = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file)
out_info = cov.execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": sat.get_instrument(None)._id,
"modeId": sat.get_instrument(None).get_mode_id()[0],
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x > 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) positive roll test. Run the test again.')
############ negative roll ############
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_accessY.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x < 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) negative roll test. Run the test again.')
def test_execute_2(self):
""" Orient the sensor with varying yaw but same pitch and roll, and test that the captured ground-points remain the same
(Conical Sensor).
"""
####### Common attributes for both simulations #######
duration = 0.1
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 0,
"aop": 0, "ta": 0}
}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 5})
pitch = 15
roll = 10.5
######## Simulation 1 #######
yaw = random.uniform(0,360)
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": roll, "zRotation": yaw},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
out_info = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": sat.get_instrument(None)._id,
"modeId": sat.get_instrument(None).get_mode_id()[0],
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
access_data1 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 2 ########
yaw = random.uniform(0,360)
instrument_dict = {"mode":[{"@id":"m1", "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": roll, "zRotation": yaw}}],
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"sen1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
out_info = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": "sen1",
"modeId": "m1",
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
access_data2 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## compare the results of both the simulations ########
if not access_data1.empty:
(lat1, lon1) = grid.get_lat_lon_from_index(access_data1['GP index'].tolist())
(lat2, lon2) = grid.get_lat_lon_from_index(access_data2['GP index'].tolist())
self.assertTrue(lat1==lat2)
else:
warnings.warn('No data was generated in test_execute_2(.). Run the test again.')
def test_execute_3(self):
""" Orient the sensor with pitch and test that the times the ground-points are captured lag or lead (depending on direction of pitch)
as compared to the coverage from a zero pitch sensor. (Conical Sensor)
Fixed inputs used.
"""
####### Common attributes for all the simulations #######
duration = 0.1
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 45, "raan": 245,
"aop": 0, "ta": 0}
}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 5})
grid.write_to_file(self.out_dir+'/grid.csv')
######## Simulation 1 #######
pitch = 0
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access1.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data1 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 2 #######
pitch = 25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access2.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data2 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 3 #######
pitch = -25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access3.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data3 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## compare the results of both the simulations ########
# the first gpi in pitch forward pitch case is detected earlier than in the zero pitch case and (both) earlier than the pitch backward case
self.assertEqual(access_data3["GP index"][0], 1436)
self.assertEqual(access_data3["time index"][0], 51)
self.assertEqual(access_data1["GP index"][0], 1436)
self.assertEqual(access_data1["time index"][0], 91)
self.assertEqual(access_data2["GP index"][34], 1436)
self.assertEqual(access_data2["time index"][34], 123)
def test_execute_4(self):
""" Orient the sensor with roll, and an equatorial orbit and check that the ground-points captured are on either
side of hemisphere only. (Rectangular Sensor)
"""
############ Common attributes for both positive and negative roll tests ############
duration = 0.1
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2})
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 20,
"aop": 0, "ta": 120}
}
############ positive roll ############
# setup spacecraft with some parameters setup randomly
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":12.5},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_accessX.csv'
# run the coverage calculator
cov = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file)
cov.execute(out_file_access=out_file_access)
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x > 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) positive roll test. Run the test again.')
############ negative roll ############
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_accessY.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x < 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) negative roll test. Run the test again.')
def test_execute_5(self):
""" Orient the sensor with pitch and test that the times the ground-points are captured lag or lead (depending on direction of pitch)
as compared to the coverage from a zero pitch sensor. (Rectangular Sensor)
Fixed inputs used.
"""
####### Common attributes for all the simulations #######
duration = 0.1
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 45, "raan": 245,
"aop": 0, "ta": 0}
}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 5})
grid.write_to_file(self.out_dir+'/grid.csv')
######## Simulation 1 #######
pitch = 0
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
#factory = PropagatorFactory()
#prop = factory.get_propagator({"@type": 'J2 ANALYTICAL PROPAGATOR', "stepSize": 1})
#prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access1.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(out_file_access=out_file_access)
access_data1 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 2 #######
pitch = 25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access2.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, out_file_access=out_file_access)
access_data2 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 3 #######
pitch = -25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access3.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(mode_id=None, out_file_access=out_file_access)
access_data3 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## compare the results of both the simulations ########
# the first gpi in pitch forward pitch case is detected earlier than in the zero pitch case and (both) earlier than the pitch backward case
self.assertEqual(access_data3["GP index"][0], 1436)
self.assertEqual(access_data3["time index"][0], 58)
self.assertEqual(access_data1["GP index"][0], 1436)
self.assertEqual(access_data1["time index"][0], 96)
self.assertEqual(access_data2["GP index"][25], 1436)
self.assertEqual(access_data2["time index"][25], 129)
def test_execute_6(self):
""" Check that (1) simulation with orienting spacecraft-body (bus) w.r.t NADIR_POINTING frame and sensor aligned to spacecraft-body yields the same results as
(2) simulation with orienting sensor w.r.t spacecraft-body and spacecraft-body aligned to NADIR_POINTING frame.
"""
############ Common attributes for both simulations ############
duration = 0.1
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2})
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+750,
"ecc": 0.001, "inc": 25, "raan": 120.56,
"aop": 0, "ta": 349}
}
pitch = 12
roll = -6
yaw = 240
############ simulation with orienting spacecraft w.r.t NADIR_POINTING frame and sensor aligned to spacecraft body ############
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation": pitch, "yRotation": roll, "zRotation": yaw}}
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": 0, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 }, "@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "spacecraftBus": spacecraftBus_dict, "instrument": instrument_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access1.csv'
# run the coverage calculator
cov = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file)
cov.execute(instru_id='bs1', out_file_access=out_file_access)
# check the outputs
access_data1 = | pd.read_csv(out_file_access, skiprows = [0,1,2,3]) | pandas.read_csv |
# Copyright 2018 Corti
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import progressbar
import multiprocessing
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
import librosa
from EndToEndClassification.Utilities import load_audio, load_processed_esc50
# note: the original code for processing the ESC-50 dataset by Piczak uses libc. This is not strictly necessary
# but potentially speeds-up processing. See commented-out code.
# from ctypes import cdll, CDLL
# cdll.LoadLibrary("libc.so.6")
# libc = CDLL("libc.so.6")
CPU_COUNT = multiprocessing.cpu_count()
# Note: most of the processing code below code adapted with permission from
# 'https://github.com/karoldvl/paper-2015-esc-convnet'.
# Note #2: the ported librosa v0.3.1. delta generation is added below for those interested in reproducing
# Piczak's original results with delta features added when using a recent librosa version (0.5.1).
def ESC50Processor(esc_50_path, destination_folder):
"""
Wrapper function for convenient processing. First subfolders are made in the destination folder for the processed
dataset (1) and the logmel and raw waveform features (i.e. the segmented/augmented features).
One can use the internal functions if more flexibility is required.
Args:
esc_50_path (str): path to the esc50 data.
destination_folder (str): path to a destination folder.
"""
if not (os.path.isdir(esc_50_path) and os.path.isdir(destination_folder)):
raise ValueError('please provide valid paths to a source and a destination folder')
# make the necessary subfolders for processing
processed_esc50_path = os.path.join(destination_folder, 'processed_esc50')
os.mkdir(processed_esc50_path)
features_path = os.path.join(destination_folder, 'features')
os.mkdir(features_path)
features_raw_path = os.path.join(features_path, 'raw')
os.mkdir(features_raw_path)
features_logmel_path = os.path.join(features_path, 'spect')
os.mkdir(features_logmel_path)
_process_esc50(esc_50_path, processed_esc50_path)
_dump_features_processed_esc50_combined(processed_esc50_path, features_logmel_path, features_raw_path,
augmentations=4, frames=101, seed=41, batch_size=50)
print('done')
def _process_esc50(esc_50_path, save_path):
"""
Processes the 2000 5-sec clips of the esc50 dataset and dumps a pickle with the metadata for each audio file.
The sample rate is hard-coded to 22050.
Taken with permission from 'https://github.com/karoldvl/paper-2015-esc-convnet' with minor adaptions.
Args:
esc_50_path (str): path to the base folder containing the class-specific subfolders.
save_path (str): folder in which the esc50_audio.dat and the esc50_meta.pkl files will be saved.
"""
rows_meta = []
rows_audio = []
category_counter = 0
for directory in sorted(os.listdir(esc_50_path)):
directory = os.path.join(esc_50_path, directory)
if not (os.path.isdir(directory) and os.path.basename(directory)[0:3].isdigit()):
continue
print('Parsing ' + directory)
bar = progressbar.DataTransferBar(max_value=len(os.listdir(directory)))
for i, clip in enumerate(sorted(os.listdir(directory))):
if clip[-3:] != 'ogg':
continue
filepath = '{0}/{1}'.format(directory, clip)
filename = os.path.basename(filepath)
fold = filename[0]
category = category_counter
category_name = os.path.dirname(filepath).split('/')[-1]
rows_meta.append(
pd.DataFrame({'filename': filename, 'fold': fold, 'category': category, 'category_name': category_name},
index=[0]))
rows_audio.append(load_audio(filepath, 5000, framerate=22050, channel_nr=1))
bar.update(i)
bar.finish()
# libc.malloc_trim(0)
rows_meta = [pd.concat(rows_meta, ignore_index=True)]
rows_audio = [np.vstack(rows_audio)]
category_counter = category_counter + 1
rows_meta = rows_meta[0]
rows_meta[['category', 'fold']] = rows_meta[['category', 'fold']].astype(int)
rows_meta.to_pickle(os.path.join(save_path, 'esc50_meta.pkl'))
mm = np.memmap(os.path.join(save_path, 'esc50_audio.dat'), dtype='float32', mode='w+', shape=(2000, 110250))
mm[:] = rows_audio[0][:]
mm.flush()
del rows_audio
print('processed and saved')
def _dump_features_processed_esc50_combined(load_parsed_esc50, save_folder_path, save_folder_path_raw, augmentations=4,
frames=101, seed=41, batch_size=50):
"""
Generates ESC50 features from the 'processed' dataset. It does so according to the specifications in the paper.
Each of the 2000 5sec clips is cut into 50% overlapping segments. 4 augmentations are made of each.
Largely the same in implementation as the original Piczak code.
Args:
load_parsed_esc50 (str): folder containing the esc50_meta.pkl and esc50_audio.dat files.
save_folder_path (str): folder for saving logscaled mel features.
save_folder_path_raw (str): folder for saving raw waveform features.
augmentations (int): number of augmentations of each segment.
frames (int): nr of frames of the mel features.
seed (int): seed for pseudo RNG.
batch_size (int): batch size for multiprocessing (note, this has nothing to do with the minibatch size).
"""
np.random.seed(seed)
# checks
if isinstance(load_parsed_esc50, str):
meta, audio = load_processed_esc50(load_parsed_esc50)
else:
raise ValueError('load_parsed_esc50 should be a to folder')
if not (os.path.isdir(save_folder_path) and os.path.isdir(save_folder_path_raw)):
raise ValueError('please provide valid folders for saving the features')
segments = []
segments_raw = []
for b in range(len(audio) // batch_size + 1):
print('b:{}'.format(b))
start = b * batch_size
end = (b + 1) * batch_size
if end > len(audio):
end = len(audio)
seg_combined = Parallel(n_jobs=CPU_COUNT)(delayed(_extract_segments_combined)((
audio[i, :],
meta.loc[i, 'filename'],
meta.loc[i, 'fold'],
meta.loc[i, 'category'],
meta.loc[i, 'category_name'],
0,
frames
)) for i in range(start, end))
segments_batch = [seg[0] for seg in seg_combined]
segments_raw_batch = [seg[1] for seg in seg_combined]
segments.extend(segments_batch)
segments_raw.extend(segments_raw_batch)
for _ in range(augmentations):
seg_combined = Parallel(n_jobs=CPU_COUNT)(delayed(_extract_segments_combined)((
_augment_esc50(audio[i, :]),
meta.loc[i, 'filename'],
meta.loc[i, 'fold'],
meta.loc[i, 'category'],
meta.loc[i, 'category_name'],
1,
frames
)) for i in range(start, end))
segments_batch = [seg[0] for seg in seg_combined]
segments_raw_batch = [seg[1] for seg in seg_combined]
segments.extend(segments_batch)
segments_raw.extend(segments_raw_batch)
segments = [ | pd.concat(segments, ignore_index=True) | pandas.concat |
#!/usr/bin/env python
"""Tests for `featureeng` package."""
import pytest
import pandas as pd
from featureeng import featureeng
from numpy.testing import assert_almost_equal, assert_equal
# @pytest.mark.xfail(reason ="Being Lazy, test_aggs_by_columns() has not yet been implemented")
def test_aggs_by_columns():
expected_data = pd.DataFrame()
expected_columns = []
expected_agg_list = []
expected_agg_cols = []
assert isinstance(expected_data, pd.DataFrame), "Expected Value = DataFrame"
assert isinstance(expected_columns, list), "Expected Value = list"
assert isinstance(expected_agg_list, list), "Expected Value = list"
assert isinstance(expected_agg_cols, list), "Expected Value = list"
assert_equal( isinstance(featureeng.aggs_by_columns(expected_data, expected_columns, expected_agg_list, expected_agg_cols), pd.DataFrame), True)
def test_frequency_encode():
expected_column_name = 'test_column'
expected_data = | pd.DataFrame(data=[[4,7], [4,7]], columns=[expected_column_name, 'column_two']) | pandas.DataFrame |
import calendar
from datetime import datetime
import ccxt
import numpy as np
import pandas as pd
from stockstats import StockDataFrame as Sdf
class CCXTEngineer:
def __init__(self):
self.binance = ccxt.binance()
def data_fetch(self, start, end, pair_list=["BTC/USDT"], period="1m"):
def min_ohlcv(dt, pair, limit):
since = calendar.timegm(dt.utctimetuple()) * 1000
ohlcv = self.binance.fetch_ohlcv(
symbol=pair, timeframe="1m", since=since, limit=limit
)
return ohlcv
def ohlcv(dt, pair, period="1d"):
ohlcv = []
limit = 1000
if period == "1m":
limit = 720
elif period == "1d":
limit = 1
elif period == "1h":
limit = 24
elif period == "5m":
limit = 288
for i in dt:
start_dt = i
since = calendar.timegm(start_dt.utctimetuple()) * 1000
if period == "1m":
ohlcv.extend(min_ohlcv(start_dt, pair, limit))
else:
ohlcv.extend(
self.binance.fetch_ohlcv(
symbol=pair, timeframe=period, since=since, limit=limit
)
)
df = pd.DataFrame(
ohlcv, columns=["time", "open", "high", "low", "close", "volume"]
)
df["time"] = [
datetime.fromtimestamp(float(time) / 1000) for time in df["time"]
]
df["open"] = df["open"].astype(np.float64)
df["high"] = df["high"].astype(np.float64)
df["low"] = df["low"].astype(np.float64)
df["close"] = df["close"].astype(np.float64)
df["volume"] = df["volume"].astype(np.float64)
return df
crypto_column = pd.MultiIndex.from_product(
[pair_list, ["open", "high", "low", "close", "volume"]]
)
first_time = True
for pair in pair_list:
start_dt = datetime.strptime(start, "%Y%m%d %H:%M:%S")
end_dt = datetime.strptime(end, "%Y%m%d %H:%M:%S")
start_timestamp = calendar.timegm(start_dt.utctimetuple())
end_timestamp = calendar.timegm(end_dt.utctimetuple())
if period == "1m":
date_list = [
datetime.utcfromtimestamp(float(time))
for time in range(start_timestamp, end_timestamp, 60 * 720)
]
else:
date_list = [
datetime.utcfromtimestamp(float(time))
for time in range(start_timestamp, end_timestamp, 60 * 1440)
]
df = ohlcv(date_list, pair, period)
if first_time:
dataset = pd.DataFrame(columns=crypto_column, index=df["time"].values)
first_time = False
temp_col = pd.MultiIndex.from_product(
[[pair], ["open", "high", "low", "close", "volume"]]
)
dataset[temp_col] = df[["open", "high", "low", "close", "volume"]].values
print("Actual end time: " + str(df["time"].values[-1]))
return dataset
def add_technical_indicators(
self,
df,
pair_list,
tech_indicator_list=[
"macd",
"boll_ub",
"boll_lb",
"rsi_30",
"dx_30",
"close_30_sma",
"close_60_sma",
],
):
df = df.dropna()
df = df.copy()
column_list = [
pair_list,
["open", "high", "low", "close", "volume"] + (tech_indicator_list),
]
column = pd.MultiIndex.from_product(column_list)
index_list = df.index
dataset = pd.DataFrame(columns=column, index=index_list)
for pair in pair_list:
pair_column = pd.MultiIndex.from_product(
[[pair], ["open", "high", "low", "close", "volume"]]
)
dataset[pair_column] = df[pair]
temp_df = df[pair].reset_index().sort_values(by=["index"])
temp_df = temp_df.rename(columns={"index": "date"})
crypto_df = Sdf.retype(temp_df.copy())
for indicator in tech_indicator_list:
temp_indicator = crypto_df[indicator].values.tolist()
dataset[(pair, indicator)] = temp_indicator
print("Succesfully add technical indicators")
return dataset
def df_to_ary(
self,
df,
pair_list,
tech_indicator_list=[
"macd",
"boll_ub",
"boll_lb",
"rsi_30",
"dx_30",
"close_30_sma",
"close_60_sma",
],
):
df = df.dropna()
date_ary = df.index.values
price_array = df[ | pd.MultiIndex.from_product([pair_list, ["close"]]) | pandas.MultiIndex.from_product |
from typing import List, Text, Dict
from dataclasses import dataclass
import ssl
import urllib.request
from io import BytesIO
from zipfile import ZipFile
from urllib.parse import urljoin
from logging import exception
import os
from re import findall
from datetime import datetime, timedelta
import lxml.html as LH
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
from selenium.webdriver.support.ui import WebDriverWait
import warnings
import string
import re
from bs4 import BeautifulSoup
import requests
import glob
import time
import os
from fake_useragent import UserAgent
import brFinance.utils as utils
import pickle
ssl._create_default_https_context = ssl._create_unverified_context
warnings.simplefilter(action='ignore', category=FutureWarning)
@dataclass
class SearchENET:
"""
Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx according to the input parameters
"""
def __init__(self, cod_cvm: int = None, category: int = None, driver: utils.webdriver = None):
self.driver = driver
# self.cod_cvm_dataframe = self.cod_cvm_list()
self.cod_cvm = cod_cvm
if cod_cvm is not None:
self.check_cod_cvm_exist(self.cod_cvm)
self.category = category
if category is not None:
self.check_category_exist(self.category)
def cod_cvm_list(self) -> pd.DataFrame:
"""
Returns a dataframe of all CVM codes and Company names availble at https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx
"""
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx")
#wait_pageload()
for retrie in range(50):
try:
html = str(driver.find_element_by_id('hdnEmpresas').get_attribute("value"))
listCodCVM = re.findall("(?<=\_)(.*?)(?=\')", html)
listNomeEmp = re.findall("(?<=\-)(.*?)(?=\')", html)
codigos_cvm = pd.DataFrame(list(zip(listCodCVM, listNomeEmp)),
columns=['codCVM', 'nome_empresa'])
codigos_cvm['codCVM'] = pd.to_numeric(codigos_cvm['codCVM'])
if len(codigos_cvm.index) > 0:
break
else:
time.sleep(1)
except:
time.sleep(1)
if self.driver is None:
driver.quit()
return codigos_cvm
def check_cod_cvm_exist(self, cod_cvm) -> bool:
codigos_cvm_available = self.cod_cvm_list()
cod_cvm_exists = str(cod_cvm) in [str(cod_cvm_aux) for cod_cvm_aux in codigos_cvm_available['codCVM'].values]
if cod_cvm_exists:
return True
else:
raise ValueError('Código CVM informado não encontrado.')
def check_category_exist(self, category) -> bool:
search_categories_list = [21, 39]
if category in search_categories_list:
return True
else:
raise ValueError('Invalid category value. Available categories are:', search_categories_list)
@property
def search(self) -> pd.DataFrame:
"""
Returns dataframe of search results including cod_cvm, report's url, etc.
"""
dataInicial = '01012010'
dataFinal = datetime.today().strftime('%d%m%Y')
option_text = str(self.category)
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx?codigoCVM={str(self.cod_cvm)}")
# Wait and click cboCategorias_chosen
for errors in range(10):
try:
driver.find_element_by_id('cboCategorias_chosen').click()
break
except:
time.sleep(1)
# Wait and click
for errors in range(10):
try:
driver.find_element_by_xpath(
f"//html/body/form[1]/div[3]/div/fieldset/div[5]/div[1]/div/div/ul/li[@data-option-array-index='{option_text}']").click()
break
except:
time.sleep(1)
# Wait and click
for errors in range(10):
try:
driver.find_element_by_xpath("//html/body/form[1]/div[3]/div/fieldset/div[4]/div[1]/label[4]").click()
break
except:
time.sleep(1)
# Wait and send keys txtDataIni
for errors in range(10):
try:
driver.find_element_by_id('txtDataIni').send_keys(dataInicial)
break
except:
time.sleep(1)
# Wait and send keys txtDataFim
for errors in range(10):
try:
driver.find_element_by_id('txtDataFim').send_keys(dataFinal)
break
except:
time.sleep(1)
# Wait and click btnConsulta
for errors in range(10):
try:
driver.find_element_by_id('btnConsulta').click()
break
except:
time.sleep(1)
# Wait html table load the results (grdDocumentos)
for errors in range(10):
try:
table_html = pd.read_html(str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML")))[-1]
if len(table_html.index) > 0:
break
else:
time.sleep(1)
except:
time.sleep(1)
table_html = str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML"))
table = LH.fromstring(table_html)
results = pd.read_html(table_html)
for df_result in results:
if len(df_result.index) > 0:
pattern = "OpenPopUpVer(\'(.*?)\')"
df_result['linkView'] = table.xpath('//tr/td/i[1]/@onclick')
df_result['linkDownload'] = table.xpath('//tr/td/i[2]/@onclick')
df_result['linkView'] = "https://www.rad.cvm.gov.br/ENET/" + \
df_result['linkView'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False)
df3 = df_result['linkDownload'].str.split(',', expand=True)
df3.columns = ['COD{}'.format(x+1) for x in df3.columns]
df_result = df_result.join(df3)
df_result['linkDownload'] = "https://www.rad.cvm.gov.br/ENET/frmDownloadDocumento.aspx?Tela=ext&numSequencia=" + \
df_result['COD1'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&numVersao=" + df_result['COD2'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&numProtocolo=" + df_result['COD3'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&descTipo=" + df_result['COD4'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&CodigoInstituicao=1"
df_result = df_result[['Código CVM', 'Empresa', 'Categoria', 'Tipo', 'Espécie',
'Data Referência', 'Data Entrega', 'Status', 'V', 'Modalidade',
'linkView', 'linkDownload']]
df_result['Data Referência'] = df_result['Data Referência'].str.split(
' ', 1).str[1]
df_result['Data Referência'] = pd.to_datetime(
df_result["Data Referência"], format="%d/%m/%Y", errors="coerce")
df_result = df_result[df_result["Status"] == "Ativo"]
df_result["Código CVM"] = self.cod_cvm
df_result = df_result[['Código CVM', 'Empresa', 'Categoria', 'Tipo', 'Espécie',
'Data Referência', 'Data Entrega', 'Status', 'V', 'Modalidade',
'linkView', 'linkDownload']]
df_result = df_result.reset_index(drop=True)
break
if self.driver is None:
driver.quit()
print(f"Resultados da busca ENET: {len(df_result.index)}")
return df_result
@dataclass
class FinancialReport:
def __init__(self, link: str, driver: utils.webdriver = None):
self.link = link
self.driver = driver
@property
def financial_reports(self) -> Dict:
"""
Returns a dictionary with financial reports available in a page such as
Reports currently available:
-
"""
link = self.link
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
erros = 0
max_retries = 10
dictDemonstrativos = None
while erros < max_retries:
try:
print("Coletando dados do link:", link)
driver.get(link)
# Wait page load the reports
for retrie in range(max_retries):
# Quando o captcha é que quebrado, options_text trás as opções de demonstrativos
options_text = [x.get_attribute("text") for x in driver.find_element_by_name(
"cmbQuadro").find_elements_by_tag_name("option")]
if len(options_text) > 0:
break
else:
time.sleep(1)
# Navega nos demonstrativos e salva o dataframe no dicionario
refDate = driver.find_element_by_id('lblDataDocumento').text
versaoDoc = driver.find_element_by_id(
'lblDescricaoCategoria').text.split(" - ")[-1].replace("V", "")
report = {"ref_date": refDate,
"versao": int(versaoDoc),
"cod_cvm": int(driver.find_element_by_id('hdnCodigoCvm').get_attribute("value"))
}
dictDemonstrativos = {}
for demonstrativo in options_text:
print(demonstrativo)
driver.find_element_by_xpath("//select[@name='cmbQuadro']/option[text()='{option_text}']".format(option_text=demonstrativo)).click()
iframe = driver.find_element_by_xpath(
"//iframe[@id='iFrameFormulariosFilho']")
driver.switch_to.frame(iframe)
html = driver.page_source
if demonstrativo == "Demonstração do Fluxo de Caixa":
index_moeda = -2
else:
index_moeda = -1
moedaUnidade = driver.find_element_by_id(
'TituloTabelaSemBorda').text.split(" - ")[index_moeda].replace("(", "").replace(")", "")
if demonstrativo == "Demonstração das Mutações do Patrimônio Líquido":
df = pd.read_html(html, header=0, decimal=',')[1]
converters = {c: lambda x: str(x) for c in df.columns}
df = pd.read_html(html, header=0, decimal=',',
converters=converters)[1]
else:
df = pd.read_html(html, header=0, decimal=',')[0]
converters = {c: lambda x: str(x) for c in df.columns}
df = pd.read_html(html, header=0, decimal=',',
converters=converters)[0]
for ind, column in enumerate(df.columns):
if column.strip() != "Conta" and column.strip() != "Descrição":
df[column] = df[column].astype(
str).str.strip().str.replace(".", "")
df[column] = pd.to_numeric(df[column], errors='coerce')
else:
df[column] = df[column].astype(
'str').str.strip().astype('str')
# Pega apenas a primeira coluna de valores, correspondente ao demonstrativo mais atual, e renomeia para "Valor"
if demonstrativo != "Demonstração das Mutações do Patrimônio Líquido":
df = df.iloc[:, 0:3]
df.set_axis([*df.columns[:-1], 'Valor'],
axis=1, inplace=True)
# Add data de referencia e versão aos Dataframes
df["refDate"] = refDate
df["versaoDoc"] = versaoDoc
df["moedaUnidade"] = moedaUnidade
df["refDate"] = pd.to_datetime(df["refDate"], errors="coerce")
# Add ao dicionario de demonstrativos
dictDemonstrativos[demonstrativo] = df
driver.switch_to.default_content()
print("-"*60)
# Add data de referencia ao ditc de demonstrativos
report["reports"] = dictDemonstrativos
break
except Exception as exp:
print("Erro ao carregar demonstrativo. Tentando novamente...")
print(str(exp))
erros += 1
continue
if self.driver is None:
driver.quit()
return report
@dataclass
class Company:
def __init__(self, cod_cvm: int):
self.cod_cvm = cod_cvm
def obtemCompCapitalSocial(self):
self.ComposicaoCapitalSocial = composicao_capital_social(self._codCVM)
def obterDadosCadastrais(self):
listaCodCVM = obtemDadosCadastraisCVM(self._codCVM)
listaCodCVM = listaCodCVM[listaCodCVM["CD_CVM"] == self._codCVM]
self.dadosCadastrais = listaCodCVM.to_dict('r')
@property
def reports(self) -> List:
driver = utils.Browser.run_chromedriver()
search_anual_reports = SearchENET(cod_cvm=self.cod_cvm, category=21, driver=driver).search
search_quarter_reports = SearchENET(cod_cvm=self.cod_cvm, category=39, driver=driver).search
search_reports_result = search_anual_reports.append(search_quarter_reports)
reports = {}
for index, report_info in search_reports_result.iterrows():
m = re.search(r"(?<=\Documento=)(.*?)(?=\&)", report_info['linkView'])
if m:
document_number = m.group(1)
# Create folder and save reports locally
path_save_reports = f'{os.getcwd()}/reports'
report_file = f'{path_save_reports}/{document_number}.plk'
utils.File.create_folder(path_save_reports)
# Check if report is available locally, otherwise scrape it.
if utils.File.check_exist(report_file):
with open(report_file, 'rb') as load_report:
report_obj = pickle.load(load_report)
print("Carregado localmente!")
else:
report_obj = FinancialReport(link=report_info["linkView"], driver=driver).financial_reports
with open(report_file, 'wb') as save_report:
pickle.dump(report_obj, save_report)
reports[report_obj["ref_date"]] = report_obj["reports"]
driver.quit()
return reports
if __name__ == '__main__':
petrobras = Company(cod_cvm=9512)
def obtemDadosCadastraisCVM(compAtivas=True, codCVM=False):
"""
Returns a dataframe of Registration data for all Companies available at http://dados.cvm.gov.br/dados/CIA_ABERTA/CAD/DADOS/cad_cia_aberta.csv
"""
url = "http://dados.cvm.gov.br/dados/CIA_ABERTA/CAD/DADOS/cad_cia_aberta.csv"
#s = requests.get(url).content
dados_cadastrais_empresas = pd.read_csv(url, sep=";", encoding="latin")
if compAtivas:
dados_cadastrais_empresas = dados_cadastrais_empresas[
dados_cadastrais_empresas["SIT"] == "ATIVO"]
if codCVM:
dados_cadastrais_empresas = dados_cadastrais_empresas[dados_cadastrais_empresas["CD_CVM"] == int(
codCVM)]
return dados_cadastrais_empresas
def composicao_capital_social(codCVM):
"""
This metodh will be deprecated
"""
dfQuantPapeis = pd.DataFrame()
for cod in codCVM:
erro = 1
cod = str(cod)
while erro <= 3:
try:
print(cod)
url = "http://bvmf.bmfbovespa.com.br/pt-br/mercados/acoes/empresas/ExecutaAcaoConsultaInfoEmp.asp?CodCVM={codCVM}".format(
codCVM=cod)
#
html_content = requests.get(url).content.decode("utf8")
tableDados = BeautifulSoup(html_content, "lxml").find(
"div", attrs={"id": "accordionDados"})
tickers = re.findall(
"'[a-z|A-Z|0-9][a-z|A-Z|0-9][a-z|A-Z|0-9][a-z|A-Z|0-9][0-9][0-9]?'", str(tableDados))
tickers = [ticker.replace("'", "") for ticker in tickers]
tickers = list(dict.fromkeys(tickers))
tickers = pd.DataFrame(tickers, columns=['ticker'])
tickers["codCVM"] = cod
dicCapitalSocial = BeautifulSoup(html_content, "lxml").find(
"div", attrs={"id": "divComposicaoCapitalSocial"})
dfs = pd.read_html(str(dicCapitalSocial), thousands='.')[0]
dfs.columns = ["Tipo", "Quantidade"]
dfs["codCVM"] = cod
dfs["dt_load"] = datetime.now()
dfs = tickers.merge(dfs, on="codCVM")
print(dfs)
dfQuantPapeis = dfQuantPapeis.append(dfs)
break
except Exception as exp:
print("Tentando novamente:", cod)
print(str(exp))
erro += 1
print("*"*50)
return dfQuantPapeis
def obter_dados_negociacao(dateToday=datetime.now().strftime("%Y-%m-%d")):
print(dateToday)
url = f"https://arquivos.b3.com.br/api/download/requestname?fileName=InstrumentsConsolidated&date={dateToday}"
payload = {}
ua = UserAgent()
headers = {
'User-Agent': str(ua.chrome)}
response = requests.request("GET", url, headers=headers, data=payload)
if response.ok:
token = response.json().get('token')
baseURL = f"https://arquivos.b3.com.br/api/download/?token={token}"
data = pd.read_csv(baseURL,
sep=";",
encoding='latin-1',
error_bad_lines=True)
data["data_load"] = datetime.now()
print("Baixando arquivo!")
r = urllib.request.urlopen(
"https://sistemaswebb3-listados.b3.com.br/isinProxy/IsinCall/GetFileDownload/NDY0ODk=").read()
print("Descompactando arquivo!")
file = ZipFile(BytesIO(r))
dfEmissor = file.open("EMISSOR.TXT")
print("Abrindo arquivo CSV!")
dfEmissor = pd.read_csv(dfEmissor, header=None, names=[
"CODIGO DO EMISSOR", "NOME DO EMISSOR", "CNPJ", "DATA CRIAÇÃO EMISSOR"])
data = data.merge(dfEmissor, left_on="AsstDesc",
right_on="CODIGO DO EMISSOR", how="left")
data.reset_index(drop=True, inplace=True)
else:
data = None
return data
def obtemCodCVM():
url = "https://cvmweb.cvm.gov.br/SWB/Sistemas/SCW/CPublica/CiaAb/ResultBuscaParticCiaAb.aspx?CNPJNome=&TipoConsult=C"
print(url)
tableDados = | pd.read_html(url, header=0) | pandas.read_html |
import flask
from flask import request
import pandas as pd
import spacy
import nltk
import numpy as np
from sklearn.cluster import KMeans
import os
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
import gensim
from gensim import corpora
from sklearn import svm
from sklearn.linear_model import LogisticRegression
app = flask.Flask(__name__)
app.config["DEBUG"] = True
nltk.download('stopwords')
nlp = spacy.load('de_core_news_sm')
STOPWORDS = set(stopwords.words('german'))
def save_obj(obj, name ):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
@app.route('/api/load', methods=['GET'])
def load():
global documents
documents = load_obj('documents')
return "Database loaded!"
@app.route('/api/save', methods=['GET'])
def save():
save_obj(documents, 'documents')
return "Database saved!"
documents = {}
model = None
tfidf = None
if not os.path.exists('obj'):
os.makedirs('obj')
elif os.path.exists('obj/documents.pkl'):
print("Loading data and models")
load();
@app.route('/', methods=['GET'])
def home():
str = "<h1>Document Clustering and Classification Web API</h1>"
return str
@app.route('/api/add', methods=['POST'])
def api_add():
request_data = request.get_json()
id = request_data['id']
if id not in documents.keys():
documents[id] = {}
documents[id]['text'] = request_data['text']
documents[id]['tags'] = request_data['tags']
if "class" in request_data.keys():
documents[id]['class'] = request_data['class']
documents[id]['fixed'] = True
return "Document added!"
@app.route('/api/debug', methods=['GET'])
def api_debug():
return pd.DataFrame.from_dict(documents, orient='index').to_html()
@app.route('/api/delclasses', methods=['GET'])
def api_del_classes():
for key in documents.keys():
if "class" in documents[key].keys():
del documents[key]['class']
return "Deleted all classes!"
@app.route('/api/initpreprocess', methods=['GET'])
def initpreprocess():
i = 1
for key in documents.keys():
if 'tokens' not in documents[key].keys():
# Lemmatize
doc = nlp(documents[key]['text'])
result = []
for token in doc:
str_token = str(token)
if not (str_token.startswith("http://") or str_token.startswith("https://") or len(str_token.strip()) <= 1 or '\\n' in str_token or '\n' in str_token):
lemma = token.lemma_.lower()
if not lemma in STOPWORDS:
result.append(lemma)
result = result + documents[key]['tags']
documents[key]['tokens'] = result
i += 1
print("Processing document {} of {}".format(str(i), len(documents.keys())))
documents_df = | pd.DataFrame.from_dict(documents, orient='index') | pandas.DataFrame.from_dict |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
''' Load and merge two CSV files - one containing messages and the other containing categories
Args:
messages_filepath (str): Path to the CSV file containing messages
categories_filepath (str): Path to the CSV file containing categories of each message
Returns:
df (DataFrame): A merged DataFrame containing messages and categories
'''
# Load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xgboost
import sklearn
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import make_classification
from sklearn.cross_validation import StratifiedKFold
import xgboost as xgb
import pandas as pd
import numpy as np
import sklearn
import matplotlib
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.metrics import precision_score
from sklearn import preprocessing
from sklearn import cross_validation
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, cross_val_score, validation_curve
from sklearn import linear_model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sympy import *
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPClassifier
import numpy as np
from scipy import interp
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
# dependencies for plotting
import matplotlib.pyplot as plt
from pylab import rcParams
import matplotlib as mpl
# dependencies for statistic analysis
from scipy import stats
#importing our parameter tuning dependencies
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import (cross_val_score, GridSearchCV, StratifiedKFold, ShuffleSplit )
#importing our dependencies for Feature Selection
from sklearn.feature_selection import (SelectKBest, chi2, RFE, RFECV)
from sklearn.linear_model import LogisticRegression, RandomizedLogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import ShuffleSplit
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from collections import defaultdict
# Importing our sklearn dependencies for the modeling
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import KFold
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import (accuracy_score, confusion_matrix, classification_report, roc_curve, auc)
from sklearn.neural_network import MLPClassifier
from itertools import cycle
from scipy import interp
import warnings
warnings.filterwarnings('ignore')
seed = 2008
np.random.seed(seed)
##read in the data
shared = pd.read_table("data/baxter.0.03.subsample.shared")
shared.head()
meta = pd.read_table("data/metadata.tsv")
##check and visualize the data
meta.head()
shared.head()
## remove unnecessary columns from meta
meta = meta[['sample','dx']]
##rename the column name "Group" to match the "sample" in meta
shared = shared.rename(index=str, columns={"Group":"sample"})
##merge the 2 datasets on sample
data= | pd.merge(meta,shared,on=['sample']) | pandas.merge |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class TestGetDummies:
@pytest.fixture
def df(self):
return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]})
@pytest.fixture(params=["uint8", "i8", np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=["dense", "sparse"])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == "sparse"
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_get_dummies_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype="object")
def test_get_dummies_basic(self, sparse, dtype):
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
)
if sparse:
expected = expected.apply(SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_types(self, sparse, dtype):
# GH 10531
s_list = list("abc")
s_series = Series(s_list)
s_df = DataFrame(
{"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]}
)
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list("abc"),
)
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
if sparse:
dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
tm.assert_series_equal(result, expected)
result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype)
expected_counts = {"int64": 1, "object": 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_get_dummies_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=["A"])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ["A"]
def test_get_dummies_include_na(self, sparse, dtype):
s = ["a", "b", np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype)
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame(
{np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]},
dtype=self.effective_dtype(dtype),
)
exp_na = exp_na.reindex(["a", "b", np.nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(
Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype)
)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_get_dummies_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = "e"
eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE")
s = [e, eacute, eacute]
res = get_dummies(s, prefix="letter", sparse=sparse)
exp = DataFrame(
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[["A", "B"]]
result = get_dummies(df, sparse=sparse)
expected = DataFrame(
{"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]},
dtype=np.uint8,
)
if sparse:
expected = DataFrame(
{
"A_a": SparseArray([1, 0, 1], dtype="uint8"),
"A_b": SparseArray([0, 1, 0], dtype="uint8"),
"B_b": SparseArray([1, 1, 0], dtype="uint8"),
"B_c": SparseArray([0, 0, 1], dtype="uint8"),
}
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
}
)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ["from_A", "from_B"]
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected = expected[["C"] + cols]
typ = SparseArray if sparse else Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix="bad", sparse=sparse)
bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"]
expected = DataFrame(
[[1, 1, 0, 1, 0], [2, 0, 1, 1, 0], [3, 1, 0, 0, 1]],
columns=["C"] + bad_columns,
dtype=np.uint8,
)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat(
[
Series([1, 2, 3], name="C"),
Series([1, 0, 1], name="bad_a", dtype="Sparse[uint8]"),
Series([0, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([1, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([0, 0, 1], name="bad_c", dtype="Sparse[uint8]"),
],
axis=1,
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=["from_A"], columns=["A"], sparse=sparse)
expected = DataFrame(
{
"B": ["b", "b", "c"],
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
if sparse:
cols = ["from_A_a", "from_A_b"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep="..", sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"A..a": [1, 0, 1],
"A..b": [0, 1, 0],
"B..b": [1, 1, 0],
"B..c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]]
if sparse:
cols = ["A..a", "A..b", "B..b", "B..c"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=["..", "__"], sparse=sparse)
expected = expected.rename(columns={"B..b": "B__b", "B..c": "B__c"})
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={"A": "..", "B": "__"}, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=["too few"], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=["bad"], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {"A": "from_A", "B": "from_B"}
df = DataFrame({"C": [1, 2, 3], "A": ["a", "b", "a"], "B": ["b", "b", "c"]})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
}
)
columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=sparse, dtype=dtype).sort_index(
axis=1
)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3, np.nan],
"A_a": arr([1, 0, 1, 0], dtype=typ),
"A_b": arr([0, 1, 0, 0], dtype=typ),
"A_nan": arr([0, 0, 0, 1], dtype=typ),
"B_b": arr([1, 1, 0, 0], dtype=typ),
"B_c": arr([0, 0, 1, 0], dtype=typ),
"B_nan": arr([0, 0, 0, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
"cat_x": arr([1, 0, 0], dtype=typ),
"cat_y": arr([0, 1, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"get_dummies_kwargs,expected",
[
(
{"data": DataFrame({"ä": ["a"]})},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["ä"]})},
DataFrame({"x_ä": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix": "ä"},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"},
DataFrame({"xäa": [1]}, dtype=np.uint8),
),
],
)
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list("aaa")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
expected = DataFrame(index=list("ABC"))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ["a", "b", np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({"b": [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True, sparse=sparse)
exp_na = DataFrame({"b": [0, 1, 0], np.nan: [0, 0, 1]}, dtype=np.uint8).reindex(
["b", np.nan], axis=1
)
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies(
[np.nan], dummy_na=True, drop_first=True, sparse=sparse
)
exp_just_na = DataFrame(index=np.arange(1))
tm.assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[["A", "B"]]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({"A_b": [0, 1, 0], "B_c": [0, 0, 1]}, dtype=np.uint8)
if sparse:
expected = expected.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype):
df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame(
{"C": [1, 2, 3], "A_b": [0, 1, 0], "B_c": [0, 0, 1], "cat_y": [0, 1, 1]}
)
cols = ["A_b", "B_c", "cat_y"]
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[["C", "A_b", "B_c", "cat_y"]]
if sparse:
for col in cols:
expected[col] = SparseArray(expected[col])
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(
df, dummy_na=True, drop_first=True, sparse=sparse
).sort_index(axis=1)
expected = DataFrame(
{
"C": [1, 2, 3, np.nan],
"A_b": [0, 1, 0, 0],
"A_nan": [0, 0, 0, 1],
"B_c": [0, 0, 1, 0],
"B_nan": [0, 0, 0, 1],
}
)
cols = ["A_b", "A_nan", "B_c", "B_nan"]
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
expected[col] = SparseArray(expected[col])
tm.assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True, sparse=sparse)
expected = expected[["C", "A_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_get_dummies_int_int(self):
data = Series([1, 2, 1])
result = get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2], dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(Categorical(["a", "b", "a"]))
result = | get_dummies(data) | pandas.get_dummies |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#For User 1
User_1 = pd.read_csv('acceleration_labelled_data.csv')
User_1 = pd.DataFrame(User_1.iloc[:, 1:6].values)
User_1.columns = ["Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
User_1["Timeframe"] = User_1["Timeframe"] - 0.017856
"""Export_csv = User_1.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/User_1.csv')
"""
#For User 2
User_2 = pd.read_csv('acceleration.csv')
User_2 = pd.DataFrame(User_2.iloc[:, 0:4].values)
User_2.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_2.insert(0, "Activity", "", True)
User_2_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_2.insert(5, "Timedifference", "", True)
User_2 = User_2.to_numpy()
for i in range(1, 33442):
User_2[i][5] = User_2[i][1] - User_2[i-1][1]
User_2 = pd.DataFrame(User_2)
User_2.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_2 = User_2.to_numpy()
User_2_annotations = User_2_annotations.to_numpy()
for i in range(0, 337):
for j in range(0, 33442):
if (User_2[j][1] > User_2_annotations[i][0]) and (User_2[j][1] < User_2_annotations[i][1]):
User_2[j][0] = User_2_annotations[i][2]
User_2 = pd.DataFrame(User_2)
User_2.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#dropping empty dataframes at start and end
User_2 = User_2.iloc[446:32897,]
#exporting file
Export_User2_csv = User_2.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/User_2.csv')
#For User 3
User_3 = pd.read_csv('acceleration.csv')
User_3 = pd.DataFrame(User_3.iloc[:, 0:4].values)
User_3.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_3.insert(0, "Activity", "", True)
User_3_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_3.insert(5, "Timedifference", "", True)
User_3 = User_3.to_numpy()
for i in range(1, len(User_3)):
User_3[i][5] = User_3[i][1] - User_3[i-1][1]
User_3 = pd.DataFrame(User_3)
User_3.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_3 = User_3.to_numpy()
User_3_annotations = User_3_annotations.to_numpy()
for i in range(0, len(User_3_annotations)):
for j in range(0, len(User_3)):
if (User_3[j][1] > User_3_annotations[i][0]) and (User_3[j][1] < User_3_annotations[i][1]):
User_3[j][0] = User_3_annotations[i][2]
User_3 = pd.DataFrame(User_3)
User_3.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_3_annotations = pd.DataFrame(User_3_annotations)
User_3_annotations.columns = ["Start", "End", "Activity", "Type"]
#dropping empty dataframes at start and end
User_3 = User_3.iloc[1604:31228,]
#exporting file
Export_User3_csv = User_3.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/User_3.csv')
#For User 4
User_4 = pd.read_csv('acceleration.csv')
User_4 = pd.DataFrame(User_4.iloc[:, 0:4].values)
User_4.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_4.insert(0, "Activity", "", True)
User_4_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_4.insert(5, "Timedifference", "", True)
User_4 = User_4.to_numpy()
for i in range(1, len(User_4)):
User_4[i][5] = User_4[i][1] - User_4[i-1][1]
User_4 = pd.DataFrame(User_4)
User_4.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_4 = User_4.to_numpy()
User_4_annotations = User_4_annotations.to_numpy()
for i in range(0, len(User_4_annotations)):
for j in range(0, len(User_4)):
if (User_4[j][1] > User_4_annotations[i][0]) and (User_4[j][1] < User_4_annotations[i][1]):
User_4[j][0] = User_4_annotations[i][2]
User_4 = pd.DataFrame(User_4)
User_4.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_4_annotations = pd.DataFrame(User_4_annotations)
User_4_annotations.columns = ["Start", "End", "Activity", "index"]
#dropping empty dataframes at start and end
User_4 = User_4.iloc[562:30679,]
#exporting file
Export_User4_csv = User_4.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/User_4.csv')
#For User 5
User_5 = pd.read_csv('acceleration.csv')
User_5 = pd.DataFrame(User_5.iloc[:, 0:4].values)
User_5.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_5.insert(0, "Activity", "", True)
User_5_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_5.insert(5, "Timedifference", "", True)
User_5 = User_5.to_numpy()
for i in range(1, len(User_5)):
User_5[i][5] = User_5[i][1] - User_5[i-1][1]
User_5 = pd.DataFrame(User_5)
User_5.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_5 = User_5.to_numpy()
User_5_annotations = User_5_annotations.to_numpy()
for i in range(0, len(User_5_annotations)):
for j in range(0, len(User_5)):
if (User_5[j][1] > User_5_annotations[i][0]) and (User_5[j][1] < User_5_annotations[i][1]):
User_5[j][0] = User_5_annotations[i][2]
User_5 = pd.DataFrame(User_5)
User_5.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_5_annotations = pd.DataFrame(User_5_annotations)
User_5_annotations.columns = ["Start", "End", "Activity", "index"]
#dropping empty dataframes at start and end
User_5 = User_5.iloc[950:30633,]
#exporting file
Export_User5_csv = User_5.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/User_5.csv')
#For User 6
User_6 = pd.read_csv('acceleration.csv')
User_6 = pd.DataFrame(User_6.iloc[:, 0:4].values)
User_6.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_6.insert(0, "Activity", "", True)
User_6_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_6.insert(5, "Timedifference", "", True)
User_6 = User_6.to_numpy()
for i in range(1, len(User_6)):
User_6[i][5] = User_6[i][1] - User_6[i-1][1]
User_6 = pd.DataFrame(User_6)
User_6.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_6 = User_6.to_numpy()
User_6_annotations = User_6_annotations.to_numpy()
for i in range(0, len(User_6_annotations)):
for j in range(0, len(User_6)):
if (User_6[j][1] > User_6_annotations[i][0]) and (User_6[j][1] < User_6_annotations[i][1]):
User_6[j][0] = User_6_annotations[i][2]
User_6 = pd.DataFrame(User_6)
User_6.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_6_annotations = | pd.DataFrame(User_6_annotations) | pandas.DataFrame |
"""
Full Pipeline: A-B-C
Development Pipeline: B-C
A) Making negative examples
B) Offering categories for redirect(Filter)
C) Predict probability for (query, category) (redirects)
"""
import pandas as pd
import re
from json import load
from utils.merge_tables import merge_product_external_id_to_categories
from utils.cousins import get_cousin_id
import random
random.seed(0)
def make_negative_examples_from_searches(json_path, path_to_data):
"""
Using file "420_searches.json"
1) Взять для каждого продукта категории и найти дальнюю категорию
2) Для каждого продукта сохранить продукт + неподходящая категория
3) Сформировать DataFrame
:param json_path: list of dicts
:param external_id_to_category: результат работы ф-ии merge_product_external_id_to_categories
:param category_tree: anytree.Node(идея использования дерева - брать не прямого предка/потомка, а "брата-сестру"/"кузенов"/"тетю-дядю")
:return: pd.DataFrame with columns = [query: str, category_id: int]
Example of dict:
{'query': '0 70',
'amount': 1,
'products': {
'89072600018': '1',
'89072600022': '1',
'89072600015': '42',}}
"""
with open(json_path) as ff:
data = load(ff)
products = pd.read_csv(path_to_data + '/products.csv')
products_categories = pd.read_csv(path_to_data + '/products_categories.csv')
def query_function(df):
query = []
product_external_id = []
for i in range(len(df)):
product_external_id.append(list(df[i]['products'].keys())[0])
query.append(df[i]['query'])
i+=1
data_query = | pd.DataFrame({'query': query, 'external_id': product_external_id}) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
| tm.assert_frame_equal(p - df, exp) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import os
import ast
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
class AffectiveMonitorDataset(Dataset):
"""
Affective Monitor Dataset in Pytorch version class
raw dataframe read from csv file is in .face_df
filepath (string):
Path to data directory
mode (string):
'FAC' is default mode to load vector of FAC unit
'RAW' load raw data which are 1347 points of facial points cloud
transform (callable,optional):
optional transform to be applied on a sample
fix_distance (bool):
will FAPU get adjusted by distance or not. Default is 'False'
subjects (list):
list of test subject number specified to be loaded
"""
def __init__(self,filepath,mode='FAC',transform=None,fix_distance=False,subjects=None,fix_PD=True,convert_label=True):
"""
Args:
filepath (string): Path to data directory
mode (string): 'FAC' is default mode to load vector of FAC unit
'RAW' load raw data which are 1347 points of facial points cloud
transform (callable,optional): optional transform to be applied on a sample
"""
# determine how many test subjects data will be loaded
if subjects:
self.subjects = subjects
else:
self.subjects = [i for i in range(1,2)]
# # removePLR or not
# self.removePLR = removePLR
# fix PD
self.fix_PD = fix_PD
# map pic index with arousal level and valence level
self.convert_label = convert_label
self.label_lookup = self.load_label(filepath)
self.fix_distance = fix_distance
# load global FAPU
self.global_fapu = self.load_FAPU(filepath)
# load samples from csv file
if mode == 'FAC':
self.samples = self.load_dataframe_FAC(filepath)
elif mode == 'RAW':
self.samples = self.load_dataframe_raw(filepath)
# option for data augmentation
self.transform = transform
def load_dataframe_FAC(self,path):
"""
Read CSV file and convert it FAC unit
"""
# create file path
filepaths = [os.path.join(path, "TestSubject"+str(i)+"\\FAP.txt") for i in self.subjects]
self.face_df_raw = pd.DataFrame()
# initialize Total dataframe
total = pd.DataFrame()
# Loop through each Testsubject folder
for i in range(len(filepaths)):
face_df = pd.read_csv(filepaths[i],header=1,delimiter=",",
quotechar=";",
# index_col="PicIndex",
skipinitialspace=True)
# set index column
face_df = face_df.set_index("PicIndex")
self.face_df_raw = self.face_df_raw.append(face_df)
# fill pupil diameter of the first row by the second row
face_df.iloc[0,face_df.columns.get_loc("PupilDiameter")] = face_df.iloc[1,face_df.columns.get_loc("PupilDiameter")]
# convert string to tuple on pupil diameter column
# right now the way we handle sample is to replace the non-int value by (0,0)
a_prev = (0,0)
for i in range(face_df.shape[0]):
# try convert string to tuple
try:
a = ast.literal_eval(face_df.iloc[i,face_df.columns.get_loc("PupilDiameter")])
# if self.fix_PD:
# # handling missing value
# if a[0] < 2.5:
# a[0] = a_prev[0]
# if a[1] < 2.5:
# a[1] = a_prev[1]
# face_df.iat[i,face_df.columns.get_loc("PupilDiameter")] = a
# # a_prev = a
# replace (0,0) if cannot convert
except:
a = a_prev
# replace tuple value to face_df
face_df.iat[i,face_df.columns.get_loc("PupilDiameter")] = a
# get list of pd_left and pd_right
pd_left, pd_right = self.tuple_to_list(face_df['PupilDiameter'])
# clean PD is self.fix_PD is true
if self.fix_PD:
# merge two eye sides signals together
pupil_merge = []
for x,y in zip(pd_left,pd_right):
if x > y:
pupil_merge.append(x)
else:
pupil_merge.append(y)
# determine missing value
# # find average (discard missing value)
# pd_sum = [0,0]
# count_left = 0
# count_right = 0
# for i in range(face_df.shape[0]):
# a = face_df.iloc[i]['PupilDiameter']
# if a[0] != 0:
# pd_sum[0] = pd_sum[0]+a[0]
# count_left += 1
# if a[1] != 0:
# pd_sum[1] = pd_sum[1]+a[1]
# count_right += 1
# pd_avg = (pd_sum[0]/count_left,pd_sum[1]/count_right)
#
# # Pad (0,0) with average value
# for i in range(face_df.shape[0]):
# a = face_df.iloc[i]['PupilDiameter']
# b = list(a)
# if b[0] == 0:
# b[0] = pd_avg[0]
# if b[1] == 0:
# b[1] = pd_avg[1]
# face_df.iat[i,face_df.columns.get_loc('PupilDiameter')] = b
# Remove PLR
illum = face_df['Illuminance'].values
depth = face_df['Depth']
pd_left, pd_right = self.tuple_to_list(face_df['PupilDiameter'])
filtered_pupil_left = self.remove_PLR(pd_left,illum,10,15)
filtered_pupil_right = self.remove_PLR(pd_right,illum,10,15)
pupil_left_to_merge = filtered_pupil_left
pupil_left_to_merge[:101] = pd_left[:101]
pupil_right_to_merge = filtered_pupil_right
pupil_right_to_merge[:101] = pd_right[:101]
else:
illum = face_df['Illuminance'].values
depth = face_df['Depth'].values
pd_left, pd_right = self.tuple_to_list(face_df['PupilDiameter'])
pupil_left_to_merge = pd_left
pupil_right_to_merge = pd_right
# pupil_avg_to_merge = [x+y for x,y in zip(pupil_left_to_merge,pupil_right_to_merge)]
# merge two eye sides signals together
pupil_comb_to_merge = []
for x,y in zip(pupil_left_to_merge,pupil_right_to_merge):
if x > y:
pupil_comb_to_merge.append(x)
else:
pupil_comb_to_merge.append(y)
# adjust FAPU if fix_distance is True, otherwise just go ahead and divide by the global FAPU
if self.fix_distance:
self.FAPUlize(face_df,self.global_fapu.iloc[0],adjust=True)
else:
# convert FAP in FAPU using global fapu
self.FAPUlize(face_df,fapu=self.global_fapu.iloc[0],adjust=False)
# create face sample loop through each picture index
# self.face_df = face_df
for i in range(1,max(face_df.index.values)+1):
# number of rows per sample
start = (i*100)-100# 0,100,200,...
end = (i*100) # 100,200,300,...
# group sequence of face point
face_per_picture = face_df.loc[i]
face_FAP_per_picture = face_per_picture.iloc[:,0:19]
face_FAP_in_sequence = []
for j in range(face_FAP_per_picture.shape[0]):
face_FAP_in_sequence.append(list(face_FAP_per_picture.iloc[j]))
# prepare pupil diameter
pupils = list(face_per_picture.loc[:,"PupilDiameter"])
# prepare illuminance
illuminance = list(face_per_picture.loc[:,"Illuminance"])
# create one sample
sample = {'faceFAP': face_FAP_in_sequence,
'PD': pupils,
'PD_left_filtered': pupil_left_to_merge[start:end],
'PD_right_filtered': pupil_right_to_merge[start:end],
'PD_avg_filtered': pupil_comb_to_merge[start:end],
'illuminance': illuminance,
'depth': depth[start:end],
'arousal': self.label_lookup.loc[i,'Arousal_target'],
'valence': self.label_lookup.loc[i,'Valence_target'] }
# append prepared sample to total dataframe
total = total.append(sample, ignore_index=True)
return total
def load_dataframe_raw(self,path):
"""
Read CSV file and convert it to all points obtained
"""
# create file path
filepaths = [os.path.join(path, "TestSubject"+str(i)+"\\Data.txt") for i in self.subjects]
# initialize Total dataframe
total = pd.DataFrame()
# Loop through each Testsubject folder
for filepath in filepaths:
face_df = pd.read_csv(filepath,header=2,
delimiter=",",
quotechar=";",
index_col="PicIndex",
skipinitialspace=True)
# convert string to tuple
for i in range(0,1347):
face_df.iloc[:,i] = pd.Series([ast.literal_eval(x) for x in face_df.iloc[:,i]])
# create face sample loop through each picture index
for i in range(1,4):
# group sequence of face point
face_per_picture = face_df.loc[i]
face_points_per_picture = face_per_picture.iloc[:,0:1347]
face_points_in_sequence = []
for j in range(face_points_per_picture.shape[0]):
face_points_in_sequence.append(list(face_points_per_picture.iloc[j]))
# prepare pupil diameter
pupils = list(face_per_picture.loc[:,"PupilDiameter"])
# prepare illuminance
illuminance = list(face_per_picture.loc[:,"Illuminance"])
# create one sample
sample = {'facepoints': face_points_in_sequence,
'PD': pupils,
'illuminance': illuminance,
'arousal': self.label_lookup.loc[i,'Arousal_target'],
'valence': self.label_lookup.loc[i,'Valence_target'] }
# append prepared sample to total dataframe
total = total.append(sample, ignore_index=True)
return total
def load_label(self,path):
filepath_label = os.path.join(path, "TestSubject1\\SAMrating.txt")
SAM_df = pd.read_csv(filepath_label,header=1,index_col="PictureIndex")
# define function to convert the raw SAM to our 5 labels
def convert_to_label(SAM):
scale = 1
target_scale = scale*((SAM-5)/4)
if -1.0 <= target_scale < -0.6:
target_scale = 1
elif -0.6 <= target_scale < -0.2:
target_scale = 2
elif -0.2 <= target_scale < 0.2:
target_scale = 3
elif 0.2 <= target_scale < 0.6:
target_scale = 4
elif 0.6 <= target_scale <= 1:
target_scale = 5
return target_scale
if self.convert_label:
# Apply function convert_to_label to Arousal and Valence Columns
SAM_df['Arousal_target'] = SAM_df['Arousal_mean(IAPS)'].apply(convert_to_label)
SAM_df['Valence_target'] = SAM_df['Valence_mean(IAPS)'].apply(convert_to_label)
else:
SAM_df['Arousal_target'] = SAM_df['Arousal_mean(IAPS)']
SAM_df['Valence_target'] = SAM_df['Valence_mean(IAPS)']
return SAM_df
def load_FAPU(self,path):
# create file path
filepaths_fapu = [os.path.join(path, "TestSubject"+str(i)+"\\TestSubjectInfo.txt") for i in self.subjects]
# loop through each test subject
subject_number = 0
# initialize Total dataframe
total = pd.DataFrame()
for filepath in filepaths_fapu:
subject_number += 1
FAPU_df = | pd.read_csv(filepath,header=6) | pandas.read_csv |
#############################################################
# ActivitySim verification against TM1
# <NAME>, <EMAIL>, 02/22/19
# C:\projects\activitysim\verification>python compare_results.py
#############################################################
import pandas as pd
import openmatrix as omx
#############################################################
# INPUTS
#############################################################
pipeline_filename = 'asim/pipeline.h5'
distance_matrix_filename = "asim/skims.omx"
asim_nmtf_alts_filename = "asim/non_mandatory_tour_frequency_alternatives.csv"
process_sp = True # False skip work/sch shadow pricing comparisons, True do them
process_tm1 = True # False only processes asim, True processes tm1 as well
asim_sp_work_filename = "asim/shadow_price_workplace_modeled_size_10.csv"
asim_sp_school_filename = "asim/shadow_price_school_modeled_size_10.csv"
asim_sp_school_no_sp_filename = "asim/shadow_price_school_modeled_size_1.csv"
tm1_access_filename = "tm1/accessibility.csv"
tm1_sp_filename = "tm1/ShadowPricing_9.csv"
tm1_work_filename = "tm1/wsLocResults_1.csv"
tm1_ao_filename = "tm1/aoResults.csv"
tm1_hh_filename = "tm1/householdData_1.csv"
tm1_cdap_filename = "tm1/cdapResults.csv"
tm1_per_filename = "tm1/personData_1.csv"
tm1_tour_filename = "tm1/indivTourData_1.csv"
tm1_jtour_filename = "tm1/jointTourData_1.csv"
tm1_trips_filename = "tm1/indivTripData_1.csv"
tm1_jtrips_filename = "tm1/jointTripData_1.csv"
#############################################################
# OUTPUT FILES FOR DEBUGGING
#############################################################
asim_zones_filename = "asim/asim_zones.csv"
asim_access_filename = "asim/asim_access.csv"
asim_per_filename = "asim/asim_per.csv"
asim_hh_filename = "asim/asim_hh.csv"
asim_tour_filename = "asim/asim_tours.csv"
asim_trips_filename = "asim/asim_trips.csv"
#############################################################
# COMMON LABELS
#############################################################
ptypes = ["", "Full-time worker", "Part-time worker", "University student", "Non-worker",
"Retired", "Student of driving age", "Student of non-driving age",
"Child too young for school"]
mode_labels = ["", "DRIVEALONEFREE", "DRIVEALONEPAY", "SHARED2FREE", "SHARED2PAY", "SHARED3FREE",
"SHARED3PAY", "WALK", "BIKE", "WALK_LOC", "WALK_LRF", "WALK_EXP", "WALK_HVY",
"WALK_COM", "DRIVE_LOC", "DRIVE_LRF", "DRIVE_EXP", "DRIVE_HVY", "DRIVE_COM"]
#############################################################
# DISTANCE SKIM
#############################################################
# read distance matrix (DIST)
distmat = omx.open_file(distance_matrix_filename)["DIST"][:]
#############################################################
# EXPORT TABLES
#############################################################
# write tables for verification
tazs = pd.read_hdf(pipeline_filename, "land_use/initialize_landuse")
tazs["zone"] = tazs.index
tazs.to_csv(asim_zones_filename, index=False)
access = pd.read_hdf(pipeline_filename, "accessibility/compute_accessibility")
access.to_csv(asim_access_filename, index=False)
hh = pd.read_hdf(pipeline_filename, "households/joint_tour_frequency")
hh["household_id"] = hh.index
hh.to_csv(asim_hh_filename, index=False)
per = pd.read_hdf(pipeline_filename, "persons/non_mandatory_tour_frequency")
per["person_id"] = per.index
per.to_csv(asim_per_filename, index=False)
tours = pd.read_hdf(pipeline_filename, "tours/stop_frequency")
tours["tour_id"] = tours.index
tours.to_csv(asim_tour_filename, index=False)
trips = pd.read_hdf(pipeline_filename, "trips/trip_mode_choice")
trips["trip_id"] = trips.index
trips.to_csv(asim_trips_filename, index=False)
#############################################################
# AGGREGATE
#############################################################
# accessibilities
if process_tm1:
tm1_access = pd.read_csv(tm1_access_filename)
tm1_access.to_csv("outputs/tm1_access.csv", na_rep=0)
asim_access = pd.read_csv(asim_access_filename)
asim_access.to_csv("outputs/asim_access.csv", na_rep=0)
#############################################################
# HOUSEHOLD AND PERSON
#############################################################
# work and school location
if process_sp:
if process_tm1:
tm1_markets = ["work_low", "work_med", "work_high", "work_high", "work_very high", "university",
"school_high", "school_grade"]
tm1 = pd.read_csv(tm1_sp_filename)
tm1 = tm1.groupby(tm1["zone"]).sum()
tm1["zone"] = tm1.index
tm1 = tm1.loc[tm1["zone"] > 0]
ws_size = tm1[["zone"]]
for i in range(len(tm1_markets)):
ws_size[tm1_markets[i] + "_modeledDests"] = tm1[tm1_markets[i] + "_modeledDests"]
ws_size.to_csv("outputs/tm1_work_school_location.csv", na_rep=0)
asim_markets = ["work_low", "work_med", "work_high", "work_high", "work_veryhigh", "university",
"highschool", "gradeschool"]
asim = pd.read_csv(asim_sp_work_filename)
asim_sch = pd.read_csv(asim_sp_school_filename)
asim_sch_no_sp = pd.read_csv(asim_sp_school_no_sp_filename)
asim_sch["gradeschool"] = asim_sch_no_sp["gradeschool"] # grade school not shadow priced
asim = asim.set_index("TAZ", drop=False)
asim_sch = asim_sch.set_index("TAZ", drop=False)
asim["gradeschool"] = asim_sch["gradeschool"].loc[asim["TAZ"]].tolist()
asim["highschool"] = asim_sch["highschool"].loc[asim["TAZ"]].tolist()
asim["university"] = asim_sch["university"].loc[asim["TAZ"]].tolist()
ws_size = asim[["TAZ"]]
for i in range(len(asim_markets)):
ws_size[asim_markets[i] + "_asim"] = asim[asim_markets[i]]
ws_size.to_csv("outputs/asim_work_school_location.csv", na_rep=0)
# work county to county flows
tazs = pd.read_csv(asim_zones_filename)
counties = ["", "SF", "SM", "SC", "ALA", "CC", "SOL", "NAP", "SON", "MAR"]
tazs["COUNTYNAME"] = pd.Series(counties)[tazs["county_id"].tolist()].tolist()
tazs = tazs.set_index("zone", drop=False)
if process_tm1:
tm1_work = pd.read_csv(tm1_work_filename)
tm1_work["HomeCounty"] = tazs["COUNTYNAME"].loc[tm1_work["HomeTAZ"]].tolist()
tm1_work["WorkCounty"] = tazs["COUNTYNAME"].loc[tm1_work["WorkLocation"]].tolist()
tm1_work_counties = tm1_work.groupby(["HomeCounty", "WorkCounty"]).count()["HHID"]
tm1_work_counties = tm1_work_counties.reset_index()
tm1_work_counties = tm1_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
tm1_work_counties.to_csv("outputs/tm1_work_counties.csv", na_rep=0)
asim_cdap = pd.read_csv(asim_per_filename)
asim_cdap["HomeCounty"] = tazs["COUNTYNAME"].loc[asim_cdap["home_taz"]].tolist()
asim_cdap["WorkCounty"] = tazs["COUNTYNAME"].loc[asim_cdap["workplace_zone_id"]].tolist()
asim_work_counties = asim_cdap.groupby(["HomeCounty", "WorkCounty"]).count()["household_id"]
asim_work_counties = asim_work_counties.reset_index()
asim_work_counties = asim_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
asim_work_counties.to_csv("outputs/asim_work_counties.csv", na_rep=0)
# auto ownership - count of hhs by num autos by taz
if process_tm1:
tm1_ao = pd.read_csv(tm1_ao_filename)
tm1_hh = pd.read_csv(tm1_hh_filename)
tm1_ao = tm1_ao.set_index("HHID", drop=False)
tm1_hh["ao"] = tm1_ao["AO"].loc[tm1_hh["hh_id"]].tolist()
tm1_autos = tm1_hh.groupby(["taz", "ao"]).count()["hh_id"]
tm1_autos = tm1_autos.reset_index()
tm1_autos = tm1_autos.pivot(index="taz", columns="ao")
tm1_autos.to_csv("outputs/tm1_autos.csv", na_rep=0)
asim_ao = pd.read_csv(asim_hh_filename)
asim_autos = asim_ao.groupby(["TAZ", "auto_ownership"]).count()["SERIALNO"]
asim_autos = asim_autos.reset_index()
asim_autos = asim_autos.pivot(index="TAZ", columns="auto_ownership")
asim_autos.to_csv("outputs/asim_autos.csv", na_rep=0)
# cdap - ptype count and ptype by M,N,H
if process_tm1:
tm1_cdap = pd.read_csv(tm1_cdap_filename)
tm1_cdap_sum = tm1_cdap.groupby(["PersonType", "ActivityString"]).count()["HHID"]
tm1_cdap_sum = tm1_cdap_sum.reset_index()
tm1_cdap_sum = tm1_cdap_sum.pivot(index="PersonType", columns="ActivityString")
tm1_cdap_sum.to_csv("outputs/tm1_cdap.csv", na_rep=0)
asim_cdap = pd.read_csv(asim_per_filename)
asim_cdap_sum = asim_cdap.groupby(["ptype", "cdap_activity"]).count()["household_id"]
asim_cdap_sum = asim_cdap_sum.reset_index()
asim_cdap_sum = asim_cdap_sum.pivot(index="ptype", columns="cdap_activity")
asim_cdap_sum.to_csv("outputs/asim_cdap.csv", na_rep=0)
# free parking by ptype
if process_tm1:
tm1_per = pd.read_csv(tm1_per_filename)
tm1_per["fp_choice"] = (tm1_per["fp_choice"] == 1) # 1=free, 2==pay
tm1_work = pd.read_csv(tm1_work_filename)
tm1_work = tm1_work.set_index("PersonID", drop=False)
tm1_per["WorkLocation"] = tm1_work["WorkLocation"].loc[tm1_per["person_id"]].tolist()
tm1_fp = tm1_per[tm1_per["WorkLocation"] > 0]
tm1_fp = tm1_fp.groupby(["type", "fp_choice"]).count()["hh_id"]
tm1_fp = tm1_fp.reset_index()
tm1_fp = tm1_fp.pivot(index="type", columns="fp_choice")
tm1_fp.to_csv("outputs/tm1_fp.csv", na_rep=0)
asim_cdap["ptypename"] = pd.Series(ptypes)[asim_cdap["ptype"].tolist()].tolist()
asim_fp = asim_cdap.groupby(["ptypename", "free_parking_at_work"]).count()["household_id"]
asim_fp = asim_fp.reset_index()
asim_fp = asim_fp.pivot(index="ptypename", columns="free_parking_at_work")
asim_fp.to_csv("outputs/asim_fp.csv", na_rep=0)
# value of time
if process_tm1:
tm1_per = pd.read_csv(tm1_per_filename)
tm1_per["vot_bin"] = pd.cut(tm1_per["value_of_time"], range(51))
tm1_per.groupby(["vot_bin"]).count()["hh_id"].to_csv("outputs/tm1_vot.csv", na_rep=0)
asim_per = pd.read_csv(asim_per_filename)
asim_per["vot_bin"] = pd.cut(asim_per["value_of_time"], range(51))
asim_per.groupby(["vot_bin"]).count()["household_id"].to_csv("outputs/asim_vot.csv", na_rep=0)
#############################################################
# TOUR
#############################################################
# indiv mandatory tour freq
tm1_imf_codes = ["", "0", "work1", "work2", "school1", "school2", "work_and_school"]
if process_tm1:
tm1_per = pd.read_csv(tm1_per_filename)
tm1_hh = pd.read_csv(tm1_hh_filename)
tm1_hh = tm1_hh.set_index("hh_id", drop=False)
tm1_per["hhsize"] = tm1_hh["size"].loc[tm1_per["hh_id"]].tolist()
# indexing starts at 1
tm1_per["imf_choice_name"] = pd.Series(tm1_imf_codes)[(tm1_per["imf_choice"]+1).tolist()].tolist()
tm1_imf = tm1_per.groupby(["type", "imf_choice_name"]).count()["hh_id"]
tm1_imf = tm1_imf.reset_index()
tm1_imf = tm1_imf.pivot(index="type", columns="imf_choice_name")
tm1_imf.to_csv("outputs/tm1_imtf.csv", na_rep=0)
asim_ao = asim_ao.set_index("household_id", drop=False)
asim_cdap["hhsize"] = asim_ao["hhsize"].loc[asim_cdap["household_id"]].tolist()
asim_cdap["ptypename"] = pd.Series(ptypes)[asim_cdap["ptype"].tolist()].tolist()
asim_imf = pd.read_csv(asim_per_filename)
asim_imf["ptypename"] = pd.Series(ptypes)[asim_imf["ptype"].tolist()].tolist()
asim_imf["mandatory_tour_frequency"] = pd.Categorical(asim_imf["mandatory_tour_frequency"],
categories=tm1_imf_codes)
asim_imf["mandatory_tour_frequency"][asim_imf["mandatory_tour_frequency"].isnull()] = "0"
asim_imf = asim_imf.groupby(["ptypename", "mandatory_tour_frequency"]).count()["household_id"]
asim_imf = asim_imf.reset_index()
asim_imf = asim_imf.pivot(index="ptypename", columns="mandatory_tour_frequency")
asim_imf.to_csv("outputs/asim_imtf.csv", na_rep=0)
# indiv mand tour departure and duration
if process_tm1:
tm1_tours = pd.read_csv(tm1_tour_filename)
tm1_tours = tm1_tours[tm1_tours["tour_category"] == "MANDATORY"]
tm1_tours["tour_purpose"][tm1_tours["tour_purpose"].str.contains("work")] = "work"
tm1_tours["tour_purpose"][tm1_tours["tour_purpose"].str.contains("s")] = "school"
tm1_mtdd = tm1_tours.groupby(["start_hour", "end_hour", "tour_purpose"]).count()["hh_id"]
tm1_mtdd = tm1_mtdd.reset_index()
tm1_mtdd_sch = tm1_mtdd[tm1_mtdd["tour_purpose"] == "school"][[
"start_hour", "end_hour", "hh_id"]].pivot(index="start_hour", columns="end_hour")
tm1_mtdd_work = tm1_mtdd[tm1_mtdd["tour_purpose"] == "work"][[
"start_hour", "end_hour", "hh_id"]].pivot(index="start_hour", columns="end_hour")
tm1_mtdd_sch.to_csv("outputs/tm1_mtdd_school.csv", na_rep=0)
tm1_mtdd_work.to_csv("outputs/tm1_mtdd_work.csv", na_rep=0)
asim_tours = pd.read_csv(asim_tour_filename)
asim_tours_man = asim_tours[asim_tours["tour_category"] == "mandatory"]
asim_mtdd = asim_tours_man.groupby(["start", "end", "tour_type"]).count()["household_id"]
asim_mtdd = asim_mtdd.reset_index()
asim_mtdd_sch = asim_mtdd[asim_mtdd["tour_type"] == "school"][[
"start", "end", "household_id"]].pivot(index="start", columns="end")
asim_mtdd_work = asim_mtdd[asim_mtdd["tour_type"] == "work"][[
"start", "end", "household_id"]].pivot(index="start", columns="end")
asim_mtdd_sch.to_csv("outputs/asim_mtdd_school.csv", na_rep=0)
asim_mtdd_work.to_csv("outputs/asim_mtdd_work.csv", na_rep=0)
# joint tour frequency
jtf_labels = ["", "0_tours", "1_Shop", "1_Main", "1_Eat", "1_Visit", "1_Disc",
"2_SS", "2_SM", "2_SE", "2_SV", "2_SD", "2_MM", "2_ME", "2_MV", "2_MD", "2_EE",
"2_EV", "2_ED", "2_VV", "2_VD", "2_DD"]
if process_tm1:
tm1_jtf = tm1_hh
tm1_jtf = tm1_jtf[tm1_jtf["jtf_choice"] > 0]
tm1_jtf["jtf_choice_label"] = pd.Series(jtf_labels)[tm1_jtf["jtf_choice"].tolist()].tolist()
tm1_jtf.groupby("jtf_choice_label").count()["hh_id"].to_csv("outputs/tm1_jtf.csv", na_rep=0)
asim_jtf = pd.read_csv(asim_hh_filename)
asim_jtf = asim_jtf[asim_jtf["joint_tour_frequency"] != ""]
asim_jtf.groupby("joint_tour_frequency").count()["household_id"].to_csv("outputs/asim_jtf.csv", na_rep=0)
# joint tour comp
if process_tm1:
tm1_jtours = pd.read_csv(tm1_jtour_filename)
comp_labels = ["", "adult", "children", "mixed"]
tm1_jtours["tour_composition_labels"] = pd.Series(comp_labels)[
tm1_jtours["tour_composition"].tolist()].tolist()
tm1_jtour_comp = tm1_jtours.groupby(["tour_purpose", "tour_composition_labels"]).count()["hh_id"]
tm1_jtour_comp = tm1_jtour_comp.reset_index()
tm1_jtour_comp = tm1_jtour_comp.pivot(index="tour_purpose", columns="tour_composition_labels")
tm1_jtour_comp.to_csv("outputs/tm1_jtour_comp.csv", na_rep=0)
asim_jtours = pd.read_csv(asim_tour_filename)
asim_jtours = asim_jtours[asim_jtours["tour_category"] == "joint"]
asim_jtour_comp = asim_jtours.groupby(["tour_type", "composition"]).count()["household_id"]
asim_jtour_comp = asim_jtour_comp.reset_index()
asim_jtour_comp = asim_jtour_comp.pivot(index="tour_type", columns="composition")
asim_jtour_comp.to_csv("outputs/asim_jtour_comp.csv", na_rep=0)
# joint tour destination
if process_tm1:
tm1_jtours["distance"] = distmat[tm1_jtours["orig_taz"]-1, tm1_jtours["dest_taz"]-1]
tm1_jtours["dist_bin"] = pd.cut(tm1_jtours["distance"], range(51))
tm1_jtours.groupby(["dist_bin"]).count()["hh_id"].to_csv("outputs/tm1_jtour_dist.csv", na_rep=0)
asim_jtours["distance"] = distmat[asim_jtours["origin"].astype(int)-1,
asim_jtours["destination"].astype(int)-1]
asim_jtours["dist_bin"] = pd.cut(asim_jtours["distance"], range(51))
asim_jtours.groupby(["dist_bin"]).count()["household_id"].to_csv("outputs/asim_jtour_dist.csv", na_rep=0)
# joint tour tdd
if process_tm1:
tm1_jtours_tdd = tm1_jtours.groupby(["start_hour", "end_hour"]).count()["hh_id"]
tm1_jtours_tdd = tm1_jtours_tdd.reset_index()
tm1_jtours_tdd = tm1_jtours_tdd.pivot(index="start_hour", columns="end_hour")
tm1_jtours_tdd.to_csv("outputs/tm1_jtours_tdd.csv", na_rep=0)
asim_jtours_tdd = asim_jtours.groupby(["start", "end"]).count()["household_id"]
asim_jtours_tdd = asim_jtours_tdd.reset_index()
asim_jtours_tdd = asim_jtours_tdd.pivot(index="start", columns="end")
asim_jtours_tdd.to_csv("outputs/asim_jtours_tdd.csv", na_rep=0)
# non-mand tour freq
alts = pd.read_csv(asim_nmtf_alts_filename)
alts["ID"] = range(len(alts))
if process_tm1:
tm1_per = pd.read_csv(tm1_per_filename)
# 0 doesn't participate in choice model therefore 0 tours, and -1 to align with asim
tm1_per["inmf_choice"][tm1_per["inmf_choice"] == 0] = 1
tm1_per["inmf_choice"] = tm1_per["inmf_choice"] - 1
tm1_nmtf_sum = tm1_per.groupby(["inmf_choice"]).count()["hh_id"]
tm1_alts = pd.concat([alts, tm1_nmtf_sum], axis=1)
tm1_alts.to_csv("outputs/tm1_nmtf.csv", na_rep=0)
asim_per_nmtf = pd.read_csv(asim_per_filename)
asim_per_nmtf["ptypename"] = pd.Series(ptypes)[asim_per_nmtf["ptype"].tolist()].tolist()
asim_nmtf_sum = asim_per_nmtf.groupby(["non_mandatory_tour_frequency"]).count()["household_id"]
asim_alts = pd.concat([alts, asim_nmtf_sum], axis=1)
asim_alts.to_csv("outputs/asim_nmtf.csv", na_rep=0)
# non_mandatory_tour_destination
if process_tm1:
tm1_tours = pd.read_csv(tm1_tour_filename)
tm1_tours["distance"] = distmat[tm1_tours["orig_taz"]-1, tm1_tours["dest_taz"]-1]
tm1_tours["dist_bin"] = pd.cut(tm1_tours["distance"], range(51))
tm1_tours_nm = tm1_tours[tm1_tours["tour_category"] == "INDIVIDUAL_NON_MANDATORY"]
tm1_tours_nm.groupby(["dist_bin"]).count()["hh_id"].to_csv("outputs/tm1_nmtd_dist.csv", na_rep=0)
asim_nm_tours = pd.read_csv(asim_tour_filename)
asim_nm_tours = asim_nm_tours[asim_nm_tours["tour_category"] == "non_mandatory"]
asim_nm_tours["distance"] = distmat[asim_nm_tours["origin"].astype(int)-1,
asim_nm_tours["destination"].astype(int)-1]
asim_nm_tours["dist_bin"] = pd.cut(asim_nm_tours["distance"], range(51))
asim_nm_tours.groupby(["dist_bin"]).count()["household_id"].to_csv("outputs/asim_nmtd_dist.csv", na_rep=0)
# non_mandatory_tour_scheduling
if process_tm1:
tm1_nmtours_tdd = tm1_tours_nm.groupby(["start_hour", "end_hour"]).count()["hh_id"]
tm1_nmtours_tdd = tm1_nmtours_tdd.reset_index()
tm1_nmtours_tdd = tm1_nmtours_tdd.pivot(index="start_hour", columns="end_hour")
tm1_nmtours_tdd.to_csv("outputs/tm1_nmtours_tdd.csv", na_rep=0)
asim_nmtours_tdd = asim_nm_tours.groupby(["start", "end"]).count()["household_id"]
asim_nmtours_tdd = asim_nmtours_tdd.reset_index()
asim_nmtours_tdd = asim_nmtours_tdd.pivot(index="start", columns="end")
asim_nmtours_tdd.to_csv("outputs/asim_nmtours_tdd.csv", na_rep=0)
# tour mode choice
if process_tm1:
tm1_tours = pd.read_csv(tm1_tour_filename)
tm1_jtours = pd.read_csv(tm1_jtour_filename)
tm1_tours["tour_mode_labels"] = pd.Series(mode_labels)[tm1_tours["tour_mode"].tolist()].tolist()
tm1_tours["tour_mode_labels"] = pd.Categorical(tm1_tours["tour_mode_labels"],
categories=mode_labels)
tm1_jtours["tour_mode_labels"] = pd.Series(mode_labels)[tm1_jtours["tour_mode"].tolist()].tolist()
tm1_jtours["tour_mode_labels"] = pd.Categorical(tm1_jtours["tour_mode_labels"],
categories=mode_labels)
tm1_nmn_tour_mode = tm1_tours.groupby(["tour_mode_labels", "tour_category"]).count()["hh_id"]
tm1_nmn_tour_mode = tm1_nmn_tour_mode.reset_index()
tm1_nmn_tour_mode = tm1_nmn_tour_mode.pivot(index="tour_mode_labels", columns="tour_category")
tm1_jtour_mode = tm1_jtours.groupby(["tour_mode_labels", "tour_category"]).count()["hh_id"]
tm1_jtour_mode = tm1_jtour_mode.reset_index()
tm1_jtour_mode = tm1_jtour_mode.pivot(index="tour_mode_labels", columns="tour_category")
tm1_tour_mode = pd.concat([tm1_nmn_tour_mode, tm1_jtour_mode], axis=1)
tm1_tour_mode.columns = ["atwork", "non_mandatory", "mandatory", "joint"]
tm1_tour_mode = tm1_tour_mode[["atwork", "joint", "mandatory", "non_mandatory"]]
tm1_tour_mode.to_csv("outputs/tm1_tour_mode.csv", na_rep=0)
asim_tours = pd.read_csv(asim_tour_filename)
asim_tours["tour_mode"] = pd.Categorical(asim_tours["tour_mode"], categories=mode_labels)
asim_tour_mode = asim_tours.groupby(["tour_mode", "tour_category"]).count()["household_id"]
asim_tour_mode = asim_tour_mode.reset_index()
asim_tour_mode = asim_tour_mode.pivot(index="tour_mode", columns="tour_category")
asim_tour_mode.to_csv("outputs/asim_tour_mode.csv", na_rep=0)
# atwork_subtour_frequency
if process_tm1:
tm1_work_tours = tm1_tours[tm1_tours["tour_purpose"].str.startswith("work")]
tm1_atwork_freq_strs = ["", "no_subtours", "eat", "business1",
"maint", "business2", "eat_business"]
tm1_work_tours["atWork_freq_str"] = pd.Series(tm1_atwork_freq_strs)[
tm1_work_tours["atWork_freq"].tolist()].tolist()
tm1_work_tours.groupby(["atWork_freq_str"]).count()["hh_id"].to_csv("outputs/tm1_atwork_tf.csv", na_rep=0)
asim_work_tours = asim_tours[asim_tours["primary_purpose"] == "work"]
asim_work_tours.groupby(["atwork_subtour_frequency"]).count()["household_id"].to_csv("outputs/asim_atwork_tf.csv",
na_rep=0)
# atwork_subtour_destination
if process_tm1:
tm1_tours = pd.read_csv(tm1_tour_filename)
tm1_tours["distance"] = distmat[tm1_tours["orig_taz"]-1, tm1_tours["dest_taz"]-1]
tm1_tours_atw = tm1_tours[tm1_tours["tour_category"] == "AT_WORK"]
tm1_tours_atw["dist_bin"] = pd.cut(tm1_tours_atw["distance"], range(51))
tm1_tours_atw.groupby(["dist_bin"]).count()["hh_id"].to_csv("outputs/tm1_atwork_dist.csv", na_rep=0)
asim_atw_tours = pd.read_csv(asim_tour_filename)
asim_atw_tours = asim_atw_tours[asim_atw_tours["tour_category"] == "atwork"]
asim_atw_tours["distance"] = distmat[asim_atw_tours["origin"].astype(int)-1,
asim_atw_tours["destination"].astype(int)-1]
asim_atw_tours["dist_bin"] = pd.cut(asim_atw_tours["distance"], range(51))
asim_atw_tours.groupby(["dist_bin"]).count()["household_id"].to_csv("outputs/asim_atwork_dist.csv", na_rep=0)
# atwork_subtour_scheduling
if process_tm1:
tm1_tours_atw_tdd = tm1_tours_atw.groupby(["start_hour", "end_hour"]).count()["hh_id"]
tm1_tours_atw_tdd = tm1_tours_atw_tdd.reset_index()
tm1_tours_atw_tdd = tm1_tours_atw_tdd.pivot(index="start_hour", columns="end_hour")
tm1_tours_atw_tdd.to_csv("outputs/tm1_atwork_tours_tdd.csv", na_rep=0)
asim_atw_tours_tdd = asim_atw_tours.groupby(["start", "end"]).count()["household_id"]
asim_atw_tours_tdd = asim_atw_tours_tdd.reset_index()
asim_atw_tours_tdd = asim_atw_tours_tdd.pivot(index="start", columns="end")
asim_atw_tours_tdd.to_csv("outputs/asim_atwork_tours_tdd.csv", na_rep=0)
# atwork_subtour_mode_choice - see tour mode above
# tour stop frequency
if process_tm1:
tm1_tours = pd.read_csv(tm1_tour_filename)
tm1_jtours = pd.read_csv(tm1_jtour_filename)
tm1_tours["tour_purpose_simple"] = tm1_tours["tour_purpose"]
tm1_tours["tour_purpose_simple"] = tm1_tours["tour_purpose_simple"].str.replace("atwork_", "")
tm1_tours["tour_purpose_simple"][tm1_tours["tour_purpose_simple"].
str.contains("work_")] = "work"
tm1_tours["tour_purpose_simple"][tm1_tours["tour_purpose_simple"].
str.contains("school_")] = "school"
tm1_tours["tour_purpose_simple"][tm1_tours["tour_purpose_simple"].
str.contains("university")] = "school"
tm1_tours["tour_purpose_simple"][tm1_tours["tour_purpose_simple"].
str.contains("escort_")] = "escort"
tm1_tours_atw = tm1_tours[tm1_tours["tour_category"] == "AT_WORK"]
tm1_tours_nmn = tm1_tours[tm1_tours["tour_category"] != "AT_WORK"]
tm1_tours_nmn["tsf"] = tm1_tours_nmn[
"num_ob_stops"].astype(str) + "-" + tm1_tours_nmn["num_ib_stops"].astype(str)
tm1_stop_freq = tm1_tours_nmn.groupby(["tsf", "tour_purpose_simple"]).count()["hh_id"]
tm1_stop_freq = tm1_stop_freq.reset_index()
tm1_stop_freq = tm1_stop_freq.pivot(index="tsf", columns="tour_purpose_simple")
tm1_jtours["tsf"] = tm1_jtours[
"num_ob_stops"].astype(str) + "-" + tm1_jtours["num_ib_stops"].astype(str)
tm1_tours_atw["tsf"] = tm1_tours_atw[
"num_ob_stops"].astype(str) + "-" + tm1_tours_atw["num_ib_stops"].astype(str)
tm1_stop_freq_joint = tm1_jtours.groupby(["tsf"]).count()["hh_id"]
tm1_stop_freq_atwork = tm1_tours_atw.groupby(["tsf"]).count()["hh_id"]
tm1_stop_freq = pd.concat([tm1_stop_freq, tm1_stop_freq_joint, tm1_stop_freq_atwork], axis=1)
tm1_stop_freq.to_csv("outputs/tm1_stop_freq.csv", na_rep=0)
asim_tours = pd.read_csv(asim_tour_filename)
asim_nmn_tours = asim_tours[(asim_tours["tour_category"] == "mandatory") |
(asim_tours["tour_category"] == "non_mandatory")]
asim_joint_tours = asim_tours[asim_tours["tour_category"] == "joint"]
asim_atw_tours = asim_tours[asim_tours["tour_category"] == "atwork"]
asim_stop_freq = asim_nmn_tours.groupby(["stop_frequency", "tour_type"]).count()["household_id"]
asim_stop_freq = asim_stop_freq.reset_index()
asim_stop_freq = asim_stop_freq.pivot(index="stop_frequency", columns="tour_type")
asim_stop_freq_joint = asim_joint_tours.groupby(["stop_frequency"]).count()["household_id"]
asim_stop_freq_atwork = asim_atw_tours.groupby(["stop_frequency"]).count()["household_id"]
asim_stop_freq = pd.concat([asim_stop_freq, asim_stop_freq_joint, asim_stop_freq_atwork], axis=1)
asim_stop_freq.to_csv("outputs/asim_stop_freq.csv", na_rep=0)
#############################################################
# TRIP
#############################################################
# trip purpose
if process_tm1:
tm1_trips = pd.read_csv(tm1_trips_filename)
tm1_jtrips = pd.read_csv(tm1_jtrips_filename)
tm1_trips["orig_purpose"][tm1_trips["orig_purpose"] == "university"] = "univ"
tm1_trips["orig_purpose"] = | pd.Categorical(tm1_trips["orig_purpose"]) | pandas.Categorical |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
| Timestamp("20130103 9:01:01") | pandas.Timestamp |
import joblib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import train_test_split
class Risk(object):
def load_data(self, data_path):
"""
加载并处理数据
:param data_path: 源数据路径
:return: 处理好的数据
"""
data = pd.read_csv(data_path)
data = data[['开始时间', '风险值', '地区名', '销售额']]
data['开始时间'] = data['开始时间'].map(lambda t: int(t.split('-')[1]))
data['销售额'] = data['销售额'].astype(np.int32)
data = pd.get_dummies(data, columns=['地区名'])
# boxplot
# plt.boxplot(data['销售额'])
# plt.show()
return data
def training(self, data):
"""
训练并保存模型
:param data: 已经处理过的数据
:return: 返回特征值
"""
y = data['风险值']
x = data.drop('风险值', axis=1)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
std = StandardScaler()
std.fit(X_train)
X_train_std = std.transform(X_train)
X_test_std = std.transform(X_test)
# ===========KNN==========
# for i in range(2, 20):
# knn_model = KNeighborsRegressor(n_neighbors=i)
# knn_model.fit(X_train_std, y_train)
# score = knn_model.score(X_test_std, y_test)
# print(score, i)
# =========随机森林=========
# for i in [1, 10, 100]:
# rgs_model = RandomForestRegressor(n_estimators=i)
# rgs_model.fit(X_train_std, y_train)
# score = rgs_model.score(X_test_std, y_test)
# print(score, i)
knn_model = KNeighborsRegressor(n_neighbors=12)
knn_model.fit(X_train_std, y_train)
pre_y_test = knn_model.predict(X_test_std)
joblib.dump(knn_model, '../model/risk.pkl')
score = knn_model.score(X_test_std, y_test)
print('模型准确率:', score)
print('MSE:', metrics.mean_squared_error(y_test, pre_y_test)) # 均方误差是非负值,模型越好MSE越接近零
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, pre_y_test))) # 均方根误差,rmse 越小越好
print('MAE为:', metrics.mean_absolute_error(y_test, pre_y_test)) # 平均绝对误差
print('决定系数:', metrics.r2_score(y_test, pre_y_test)) # r2的值越接近1,说明回归直线对观测值的拟合程度越好
return x
def predict(self, data, input):
"""
使用model进行预测
:param data:
:param input:
:return:
"""
std = StandardScaler()
std.fit(data)
input_std = std.transform(input)
model = joblib.load('../model/risk.pkl')
return model.predict(input_std)
# 聚类
def save_type(self, df):
kmeans = KMeans(3, init='k-means++')
kmeans.fit(df)
# print('kmeansPredicter labels:', np.unique(kmeansPredicter.labels_))
df_res = pd.DataFrame(kmeans.cluster_centers_.flatten())
sort_res = df_res.sort_values(by=0)
sort_res.T.to_csv('../type/risk_type.csv', header=None, index=None, mode='w')
# 映射标签
def get_map(self, num):
arr = pd.read_csv('../type/risk_type.csv', nrows=1, header=None)
arr = arr.values[0]
if num <= arr[0]:
return {'risk': num, 'label': 'low'}
elif num > arr[0] and num <= arr[1]:
return {'risk': num, 'label': 'normal'}
elif num > arr[1]:
return {'risk': num, 'label': 'high'}
# 绘图
def graph(self, origin, predict):
"""
绘图
:param origin: 原始数据
:param predict: 预测结果
:return:
"""
plt.rcParams['font.family'] = 'simhei'
plt.plot(origin, label='原始')
plt.plot(predict, label='预测')
plt.ylabel('risk')
plt.legend()
plt.show()
if __name__ == '__main__':
r = Risk()
data_path = '../data/per_month_sale_and_risk.csv'
data = r.load_data(data_path)
# 训练模型
x = r.training(data)
# 调用模型进行预测
res = r.predict(x, x[:10])
print('predict:', res)
# 聚类
r.save_type( | pd.DataFrame(data['风险值']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 16 19:59:22 2020
@author: Dell
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sicData= | pd.read_excel(r'C:\Users\Dell\Desktop\data\SIC(2011-2018).xlsx',parse_dates=[0]) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 1 18:29:16 2020
@author: POI-PC
"""
from PyQt5.QtWidgets import*
from PyQt5.QtCore import pyqtSlot
from PyQt5 import QtGui
from PyQt5 import QtCore, QtWidgets
import sys
from selenium import webdriver
import time
import pandas as pd
import numpy as np
from xlrd import open_workbook
import os
from openpyxl import *
import io
from zipfile import ZipFile
import xlrd
import codecs
import shutil
from selenium.common.exceptions import NoSuchElementException
import html5lib
from os import path
from pathlib import Path
from itertools import product
import xlwings as xw
from datetime import date
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1489, 901)
font = QtGui.QFont()
font.setPointSize(9)
MainWindow.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/bilanco.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setLocale(QtCore.QLocale(QtCore.QLocale.Turkish, QtCore.QLocale.Turkey))
MainWindow.setIconSize(QtCore.QSize(50, 50))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(380, 10, 711, 51))
font = QtGui.QFont()
font.setFamily("Tw Cen MT")
font.setPointSize(24)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.sirketGetir = QtWidgets.QPushButton(self.centralwidget)
self.sirketGetir.setGeometry(QtCore.QRect(760, 120, 241, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
font.setStrikeOut(False)
self.sirketGetir.setFont(font)
self.sirketGetir.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.sirketGetir.setLocale(QtCore.QLocale(QtCore.QLocale.Turkish, QtCore.QLocale.Turkey))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/sirketler.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.sirketGetir.setIcon(icon1)
self.sirketGetir.setIconSize(QtCore.QSize(50, 50))
self.sirketGetir.setObjectName("sirketGetir")
self.yedekleSil = QtWidgets.QPushButton(self.centralwidget)
self.yedekleSil.setGeometry(QtCore.QRect(50, 120, 241, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.yedekleSil.setFont(font)
self.yedekleSil.setLayoutDirection(QtCore.Qt.LeftToRight)
self.yedekleSil.setLocale(QtCore.QLocale(QtCore.QLocale.Turkish, QtCore.QLocale.Turkey))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("icons/clear.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.yedekleSil.setIcon(icon2)
self.yedekleSil.setIconSize(QtCore.QSize(50, 50))
self.yedekleSil.setObjectName("yedekleSil")
self.anaExcel = QtWidgets.QPushButton(self.centralwidget)
self.anaExcel.setGeometry(QtCore.QRect(1080, 120, 251, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.anaExcel.setFont(font)
self.anaExcel.setLocale(QtCore.QLocale(QtCore.QLocale.Turkish, QtCore.QLocale.Turkey))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("icons/excel2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.anaExcel.setIcon(icon3)
self.anaExcel.setIconSize(QtCore.QSize(50, 50))
self.anaExcel.setObjectName("anaExcel")
self.sirketler = QtWidgets.QListWidget(self.centralwidget)
self.sirketler.setGeometry(QtCore.QRect(290, 290, 261, 301))
self.sirketler.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.sirketler.setObjectName("sirketler")
self.gosterSirket = QtWidgets.QPushButton(self.centralwidget)
self.gosterSirket.setGeometry(QtCore.QRect(880, 330, 271, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.gosterSirket.setFont(font)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("icons/show.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.gosterSirket.setIcon(icon4)
self.gosterSirket.setIconSize(QtCore.QSize(40, 40))
self.gosterSirket.setObjectName("gosterSirket")
self.secilenIndr = QtWidgets.QPushButton(self.centralwidget)
self.secilenIndr.setGeometry(QtCore.QRect(880, 490, 271, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.secilenIndr.setFont(font)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/download.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.secilenIndr.setIcon(icon5)
self.secilenIndr.setIconSize(QtCore.QSize(40, 40))
self.secilenIndr.setObjectName("secilenIndr")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(60, 240, 191, 21))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.tumSirketler = QtWidgets.QListView(self.centralwidget)
self.tumSirketler.setGeometry(QtCore.QRect(20, 290, 256, 301))
self.tumSirketler.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.tumSirketler.setLineWidth(0)
self.tumSirketler.setResizeMode(QtWidgets.QListView.Fixed)
self.tumSirketler.setObjectName("tumSirketler")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(280, 240, 291, 16))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(290, 270, 261, 16))
self.label_5.setObjectName("label_5")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(1150, 330, 20, 211))
self.line.setLineWidth(3)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setGeometry(QtCore.QRect(20, 200, 1381, 16))
self.line_2.setLineWidth(2)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.bildirim = QtWidgets.QLineEdit(self.centralwidget)
self.bildirim.setGeometry(QtCore.QRect(10, 800, 701, 41))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.bildirim.setFont(font)
self.bildirim.setObjectName("bildirim")
self.genelGetir = QtWidgets.QPushButton(self.centralwidget)
self.genelGetir.setGeometry(QtCore.QRect(1190, 330, 261, 61))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.genelGetir.setFont(font)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("icons/geneldown.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.genelGetir.setIcon(icon6)
self.genelGetir.setIconSize(QtCore.QSize(35, 35))
self.genelGetir.setObjectName("genelGetir")
self.devamEt = QtWidgets.QPushButton(self.centralwidget)
self.devamEt.setGeometry(QtCore.QRect(1190, 480, 261, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.devamEt.setFont(font)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap("icons/continue.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.devamEt.setIcon(icon13)
self.devamEt.setIconSize(QtCore.QSize(35, 35))
self.devamEt.setObjectName("devamEt")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(610, 70, 271, 20))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(980, 230, 331, 20))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(1050, 570, 251, 20))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setGeometry(QtCore.QRect(880, 550, 531, 20))
self.line_3.setLineWidth(2)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.zipAktar = QtWidgets.QPushButton(self.centralwidget)
self.zipAktar.setGeometry(QtCore.QRect(610, 660, 241, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.zipAktar.setFont(font)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("icons/zip.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zipAktar.setIcon(icon7)
self.zipAktar.setIconSize(QtCore.QSize(40, 40))
self.zipAktar.setObjectName("zipAktar")
self.aktarExcel = QtWidgets.QPushButton(self.centralwidget)
self.aktarExcel.setGeometry(QtCore.QRect(1160, 710, 241, 61))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.aktarExcel.setFont(font)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("icons/excel3.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.aktarExcel.setIcon(icon8)
self.aktarExcel.setIconSize(QtCore.QSize(50, 50))
self.aktarExcel.setObjectName("aktarExcel")
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(1220, 640, 55, 16))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(1190, 610, 191, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(1300, 660, 51, 22))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setGeometry(QtCore.QRect(1310, 640, 21, 16))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.lineEdit_4 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_4.setGeometry(QtCore.QRect(1220, 660, 51, 22))
self.lineEdit_4.setObjectName("lineEdit_4")
self.bosZipler = QtWidgets.QListWidget(self.centralwidget)
self.bosZipler.setGeometry(QtCore.QRect(350, 670, 241, 91))
self.bosZipler.setObjectName("bosZipler")
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setGeometry(QtCore.QRect(630, 740, 181, 21))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.secHepsini = QtWidgets.QPushButton(self.centralwidget)
self.secHepsini.setGeometry(QtCore.QRect(420, 600, 131, 28))
font = QtGui.QFont()
font.setPointSize(10)
self.secHepsini.setFont(font)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("icons/selectall.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.secHepsini.setIcon(icon9)
self.secHepsini.setObjectName("secHepsini")
self.yedekle = QtWidgets.QPushButton(self.centralwidget)
self.yedekle.setGeometry(QtCore.QRect(390, 120, 241, 61))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.yedekle.setFont(font)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("icons/backup.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.yedekle.setIcon(icon10)
self.yedekle.setIconSize(QtCore.QSize(30, 30))
self.yedekle.setObjectName("yedekle")
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setGeometry(QtCore.QRect(880, 640, 256, 192))
self.listWidget.setObjectName("listWidget")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(1150, 790, 251, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(970, 290, 231, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(1260, 290, 113, 22))
self.lineEdit.setObjectName("lineEdit")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(570, 260, 301, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.listWidget_2 = QtWidgets.QListWidget(self.centralwidget)
self.listWidget_2.setGeometry(QtCore.QRect(580, 290, 256, 301))
self.listWidget_2.setObjectName("listWidget_2")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(880, 410, 271, 61))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("icons/checkbox.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon11)
self.pushButton.setIconSize(QtCore.QSize(40, 40))
self.pushButton.setObjectName("pushButton")
self.label_15 = QtWidgets.QLabel(self.centralwidget)
self.label_15.setGeometry(QtCore.QRect(10, 770, 171, 16))
font = QtGui.QFont()
font.setItalic(True)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.line_4 = QtWidgets.QFrame(self.centralwidget)
self.line_4.setGeometry(QtCore.QRect(1470, 100, 20, 731))
self.line_4.setLineWidth(3)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.label_16 = QtWidgets.QLabel(self.centralwidget)
self.label_16.setGeometry(QtCore.QRect(940, 260, 261, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.donem = QtWidgets.QLineEdit(self.centralwidget)
self.donem.setGeometry(QtCore.QRect(1260, 260, 113, 22))
self.donem.setObjectName("donem")
self.tumGetir = QtWidgets.QPushButton(self.centralwidget)
self.tumGetir.setGeometry(QtCore.QRect(1170, 400, 291, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.tumGetir.setFont(font)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap("icons/all.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tumGetir.setIcon(icon12)
self.tumGetir.setIconSize(QtCore.QSize(35, 35))
self.tumGetir.setObjectName("tumGetir")
self.label.raise_()
self.sirketGetir.raise_()
self.yedekleSil.raise_()
self.anaExcel.raise_()
self.sirketler.raise_()
self.secilenIndr.raise_()
self.label_3.raise_()
self.tumSirketler.raise_()
self.label_4.raise_()
self.label_5.raise_()
self.line.raise_()
self.line_2.raise_()
self.bildirim.raise_()
self.gosterSirket.raise_()
self.genelGetir.raise_()
self.devamEt.raise_()
self.label_7.raise_()
self.label_8.raise_()
self.label_9.raise_()
self.line_3.raise_()
self.zipAktar.raise_()
self.aktarExcel.raise_()
self.label_12.raise_()
self.label_6.raise_()
self.lineEdit_3.raise_()
self.label_13.raise_()
self.lineEdit_4.raise_()
self.bosZipler.raise_()
self.label_14.raise_()
self.secHepsini.raise_()
self.yedekle.raise_()
self.listWidget.raise_()
self.label_2.raise_()
self.label_10.raise_()
self.lineEdit.raise_()
self.label_11.raise_()
self.listWidget_2.raise_()
self.pushButton.raise_()
self.label_15.raise_()
self.line_4.raise_()
self.label_16.raise_()
self.donem.raise_()
self.tumGetir.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1489, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.label_5.setBuddy(self.sirketler)
self.retranslateUi(MainWindow)
self.sirketGetir.clicked.connect(MainWindow.sirketlerKap)
self.anaExcel.clicked.connect(MainWindow.bilancoExcel)
self.yedekleSil.clicked.connect(MainWindow.silYedekle)
self.gosterSirket.clicked.connect(self.sirketler.doItemsLayout)
self.genelGetir.clicked.connect(MainWindow.genelYukle)
self.anaExcel.released.connect(self.tumSirketler.doItemsLayout)
self.devamEt.clicked.connect(MainWindow.devamEttir)
self.zipAktar.clicked.connect(MainWindow.zipeAktar)
self.aktarExcel.clicked.connect(MainWindow.hepsiExcel)
self.zipAktar.released.connect(self.bosZipler.doItemsLayout)
self.secilenIndr.clicked.connect(MainWindow.cekSecilen)
self.secHepsini.clicked.connect(MainWindow.selectHepsi)
self.secHepsini.clicked.connect(self.sirketler.selectAll)
self.yedekle.clicked.connect(MainWindow.excelYedekle)
self.aktarExcel.clicked.connect(self.listWidget.doItemsLayout)
self.pushButton.clicked.connect(self.listWidget_2.doItemsLayout)
self.tumGetir.clicked.connect(MainWindow.donemselTum)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Bilanco Programı"))
self.label.setText(_translate("MainWindow", "Otomatik Bilanço Veri Çekme Programı V. 1.6"))
self.sirketGetir.setText(_translate("MainWindow", "Şirketleri Getir"))
self.yedekleSil.setText(_translate("MainWindow", "Sil ve Yedekle"))
self.anaExcel.setText(_translate("MainWindow", "Bilanco.xlsx Sisteme Al"))
self.gosterSirket.setText(_translate("MainWindow", "İndirilmemiş Verileri Göster"))
self.secilenIndr.setText(_translate("MainWindow", "Seçileni İndir"))
self.label_3.setText(_translate("MainWindow", "Tüm Şirketlerin Listesi"))
self.label_4.setText(_translate("MainWindow", "Sistemde Çekilmemiş Şirketler Listesi"))
self.label_5.setText(_translate("MainWindow", "(Burda tıkladıkların sisteme çekilecektir.)"))
self.bildirim.setText(_translate("MainWindow", "Bildirimler !"))
self.genelGetir.setText(_translate("MainWindow", "Tüm Şirketleri İndir"))
self.devamEt.setText(_translate("MainWindow", "Kaldığı Yerden Devam Ettir"))
self.label_7.setText(_translate("MainWindow", "Veriler İçin Ön Hazırlık"))
self.label_8.setText(_translate("MainWindow", "Verilerin İnternetten Çekildiği Yer"))
self.label_9.setText(_translate("MainWindow", "Verilerin Excel\'e Aktarılması"))
self.zipAktar.setText(_translate("MainWindow", "Zip Dosyalarını Aç"))
self.aktarExcel.setText(_translate("MainWindow", "Excel\'e Aktar"))
self.label_12.setText(_translate("MainWindow", "Dönem"))
self.label_6.setText(_translate("MainWindow", "Çekmek İstediğin Dönem"))
self.lineEdit_3.setText(_translate("MainWindow", "2019"))
self.label_13.setText(_translate("MainWindow", "Yıl"))
self.lineEdit_4.setText(_translate("MainWindow", "0"))
self.label_14.setText(_translate("MainWindow", "<-- Zip\'leri Boş Olanlar"))
self.secHepsini.setText(_translate("MainWindow", "Hepsini Seç"))
self.yedekle.setText(_translate("MainWindow", "Bilanco Yedekle"))
self.label_2.setText(_translate("MainWindow", " <-- Excel\'e Aktarılmamış Olanlar"))
self.label_10.setText(_translate("MainWindow", "İndirmek İstediğin Yılı Gir ->"))
self.lineEdit.setText(_translate("MainWindow", "2020"))
self.label_11.setText(_translate("MainWindow", "Seçilmiş (İndirilecek) Şirketler Listesi"))
self.pushButton.setText(_translate("MainWindow", "Seçilmişleri Göster"))
self.label_15.setText(_translate("MainWindow", "Writed by SVS © (2020)"))
self.label_16.setText(_translate("MainWindow", "İndirmek İstediğin Dönemi Gir ->"))
self.donem.setText(_translate("MainWindow", "5"))
self.tumGetir.setText(_translate("MainWindow", "Tüm Şirketleri Dönemsel İndir"))
class Bilanco(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.sirketler.setSelectionMode(
QAbstractItemView.ExtendedSelection
)
self.ui.sirketler.setEditTriggers(QAbstractItemView.DoubleClicked|QAbstractItemView.EditKeyPressed)
self.ui.sirketler.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.sirketler.setViewMode(QListView.ListMode)
self.ui.listWidget.setSelectionMode(
QAbstractItemView.ExtendedSelection
)
self.ui.listWidget.setEditTriggers(QAbstractItemView.DoubleClicked|QAbstractItemView.EditKeyPressed)
self.ui.listWidget.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.listWidget.setViewMode(QListView.ListMode)
self.ui.anaExcel.released.connect(self.listeyeDok)
self.ui.sirketGetir.released.connect(self.bildirim1)
#self.ui.anaExcel.released.connect(self.bildirim2)
self.ui.yedekleSil.released.connect(self.bildirim3)
self.ui.gosterSirket.clicked.connect(self.widgetListele)
self.ui.gosterSirket.released.connect(self.bildirim4)
self.ui.pushButton.clicked.connect(self.widgetSelectedShow)
self.ui.pushButton.released.connect(self.bildirim8)
#self.ui.sirketGetir.released.connect(self.listeyeDok)
self.ui.zipAktar.released.connect(self.bildirim7)
self.ui.sirketler.itemClicked.connect(self.seciliSec)
def bildirim1(self):
self.ui.bildirim.setText("Sirket Verileri Cekildi!")
def bildirim2(self):
self.ui.bildirim.setText("Excel Datası Cekildi!")
def bildirim3(self):
self.ui.bildirim.setText("Eski Veriler silindi ve Bilanco yedeklendi!")
def bildirim4(self):
self.ui.bildirim.setText("Çekilen şirketler gösterildi!")
def bildirim5(self):
self.ui.bildirim.setText("Tum veriler CEKILEMEDI!")
def bildirim6(self):
self.ui.bildirim.setText("Tum veriler basariyla cekildi!")
def bildirim7(self):
self.ui.bildirim.setText("Dosyadaki tum Zip'ler açıldı!")
def bildirim8(self):
self.ui.bildirim.setText("Secilmis sirketler gösterildi!")
def selectHepsi(self):
print("ok")
def excelYedekle(self):
today = date.today()
shutil.copy('Bilanco-Excel/Bilanco.xlsm', 'BilancoYedek/BilancoBackUp-'+str(today)+'.xlsm')
self.ui.bildirim.setText("Bilanco excel'i yedeklendi!")
def donemselTum(self):
yil = int(self.ui.lineEdit.text())
donem = int(self.ui.donem.text())
yilDonem = str(yil) + "+" + str(donem)
options = webdriver.ChromeOptions()
adres = fileName + "\Veriler\-"
#options.add_argument("download.default_directory="+ adres ")
prefs = {
"download.default_directory": adres+yilDonem,
"download.prompt_for_download": False,
"download.directory_upgrade": True
}
options.add_experimental_option('prefs', prefs)
browser = webdriver.Chrome(chrome_options=options)
browser.get("https://www.kap.org.tr/tr/")
time.sleep(5)
ftablolar = browser.find_element_by_xpath("//*[@id='financialTablesTab']/div")
ftablolar.click()
time.sleep(5)
fyil = int(browser.find_element_by_xpath("//*[@id='email-form']/div[3]/div[2]/div[1]/div[1]/div").text)
time.sleep(2)
if(fyil != yil):
flager = fyil - yil
if flager > 0:
for i in range(flager):
cyil = browser.find_element_by_xpath('//*[@id="rightFinancialTableYearSliderButton"]/div')
cyil.click()
time.sleep(2)
else:
for i in range(abs(flager)):
cyil = browser.find_element_by_xpath('//*[@id="leftFinancialTableYearSliderButton"]/div')
cyil.click()
time.sleep(2)
fdonem = 5 - donem
print(fdonem)
if(donem == 3 or donem == 4):
while(fdonem > 0):
cdonem = browser.find_element_by_xpath('//*[@id="leftFinancialTablePeriodSliderButton"]')
cdonem.click()
time.sleep(2)
fdonem = fdonem - 1
else:
while(donem > 0):
cdonem = browser.find_element_by_xpath('//*[@id="rightFinancialTablePeriodSliderButton"]')
cdonem.click()
time.sleep(2)
donem = donem - 1
getir = browser.find_element_by_xpath("//*[@id='Getir']")
getir.click()
time.sleep(5)
try:
dosyaBulunamadi = browser.find_element_by_xpath("/html/body/div[10]/div/div/div[2]/div/div[2]")
if dosyaBulunamadi:
self.ui.bildirim.setText("Istedigin tarih ve doneme ait veriler bulunamadi!")
except:
self.ui.bildirim.setText("Istenilen tarih ve donemdeki tum sirketler cekildi!")
def sirketlerKap(self):
options = webdriver.ChromeOptions()
adres = fileName2 +"\\Sirketler"
#options.add_argument("download.default_directory="+ adres ")
prefs = {
"download.default_directory": adres,
"download.prompt_for_download": False,
"download.directory_upgrade": True
}
options.add_experimental_option('prefs', prefs)
browser = webdriver.Chrome(chrome_options=options)
browser.get("https://www.kap.org.tr/tr/api/exportCompanyPages/bist-sirketler/xls")
time.sleep(20)
browser.close()
df_sirket = pd.read_html('Sirketler/Sirketler.xls')
print(df_sirket)
sirketler = []
for i in range(len(df_sirket)):
temp = df_sirket[i][1][1:]
temp = temp.to_list()
for k in range(len(temp)):
s = temp[k]
sirketler.append(s)
model = QtGui.QStandardItemModel()
self.ui.tumSirketler.setModel(model)
for i in sirketler:
item = QtGui.QStandardItem(i)
model.appendRow(item)
def widgetSelectedShow(self):
self.ui.listWidget_2.clear()
# items1 = self.ui.sirketler.selectedItems()
# print(items1)
items1 = [item.text() for item in self.ui.sirketler.selectedItems()]
print(items1)
self.ui.listWidget_2.addItems(items1)
def cekSecilen(self):
lw = self.ui.listWidget_2
items = []
for x in range(lw.count()):
items.append(str(lw.item(x).text()))
print(items)
a = 0
for sirketisim in items:
passYap = False
print(a)
a = a + 1
options = webdriver.ChromeOptions()
adres = fileName + "\Veriler\-"
#options.add_argument("download.default_directory="+ adres ")
prefs = {
"download.default_directory": adres+sirketisim,
"download.prompt_for_download": False,
"download.directory_upgrade": True
}
options.add_experimental_option('prefs', prefs)
browser = webdriver.Chrome(chrome_options=options)
browser.get("https://www.kap.org.tr/tr/")
time.sleep(5)
ftablolar = browser.find_element_by_xpath("//*[@id='financialTablesTab']/div")
ftablolar.click()
time.sleep(5)
yilx = int(self.ui.lineEdit.text())
fyil = int(browser.find_element_by_xpath("//*[@id='email-form']/div[3]/div[2]/div[1]/div[1]/div").text)
print(fyil)
print(sirketisim)
time.sleep(2)
if fyil == yilx:
print(yilx)
else:
flager = fyil - yilx
if flager > 0:
for i in range(flager):
cyil = browser.find_element_by_xpath('//*[@id="rightFinancialTableYearSliderButton"]/div')
cyil.click()
time.sleep(2)
else:
for i in range(abs(flager)):
cyil = browser.find_element_by_xpath('//*[@id="leftFinancialTableYearSliderButton"]/div')
cyil.click()
time.sleep(2)
try:
sirket = browser.find_element_by_id("Sirket-6")
sirket.send_keys(sirketisim)
time.sleep(5)
ftablolar2 = browser.find_element_by_xpath("//*[@id='calendarFilterInputFinancialTable']/div/a")
ftablolar2.click()
time.sleep(5)
except:
try:
sirket = browser.find_element_by_id("Sirket-6")
sirket.clear()
sirket.send_keys(sirketisim[:-1])
time.sleep(1)
ftablolar2 = browser.find_element_by_xpath("//*[@id='calendarFilterInputFinancialTable']/div/a")
ftablolar2.click()
time.sleep(1)
except:
sirket = browser.find_element_by_id("Sirket-6")
sirket.clear()
sirket.send_keys(sirketisim)
time.sleep(1)
getir = browser.find_element_by_xpath("//*[@id='Getir']")
getir.click()
time.sleep(5)
try:
dosyaBulunamadi = browser.find_element_by_xpath("/html/body/div[10]/div/div/div[2]/div/div[2]")
if dosyaBulunamadi:
try:
solKaydir = browser.find_element_by_xpath('//*[@id="leftFinancialTablePeriodSliderButton"]/div')
solKaydir.click()
solKaydir = browser.find_element_by_xpath('//*[@id="leftFinancialTablePeriodSliderButton"]/div')
solKaydir.click()
time.sleep(2)
getir = browser.find_element_by_xpath("//*[@id='Getir']")
getir.click()
time.sleep(5)
except:
passYap == True
os.mkdir(adres+sirketisim)
print ("Successfully created the directory %s " % path)
except:
pass
time.sleep(25)
browser.close()
if (path.exists(adres+sirketisim[:-1]+"\\2019-Tum Donemler.zip") == False) or (path.exists(adres+sirketisim+"\\2019-Tum Donemler.zip") == False):
if passYap == True:
self.ui.bildirim.setText("Tum veriler CEKILEMEDI!")
break
self.ui.bildirim.setText("Seçinler sirketler basariyla indirildi!")
def seciliSec(self):
print("ok")
def bilancoExcel(self):
sheets = pd.read_excel('Bilanco-Excel/Bilanco.xlsm' ,sheet_name=['KOZAA'])
bilanco_isim = sheets['KOZAA'].iloc[:,0]
bilanco_isim = bilanco_isim.values.tolist()
#○bilanco_isim['bilanco'] = bilanco_isim['bilanco'].str.upper()
bilanco_isim_revize = []
for i in bilanco_isim:
if i[0] == ' ':
new_i = list(i)
for letter in i:
if letter == ' ':
new_i.pop(0)
else:
i = (''.join(new_i))
bilanco_isim_revize.append(i.upper())
break
else:
bilanco_isim_revize.append(i.upper())
print("Bitti !")
def zipeAktar(self):
self.ui.bosZipler.clear()
veriler = os.listdir(fileName + "/Veriler/")
bos_veri = []
for veri in veriler:
path_sirket = []
sirket = os.listdir(fileName2 +"\\Veriler\\"+veri)
path_sirket.append(sirket)
for zipex in veriler:
path = fileName + "\\Veriler\\"
path2 = zipex + "\\2019-Tum Donemler.zip"
pathe = path + path2
exact =fileName + "\\Excels"
try:
with ZipFile(pathe, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(exact)
print("ok")
except:
bos_veri.append(zipex)
print("fail")
self.ui.bosZipler.addItems(bos_veri)
def hepsiExcel(self):
sheets = pd.read_excel('Bilanco-Excel/Bilanco.xlsm' ,sheet_name=['KOZAA'])
bilanco_isim = sheets['KOZAA'].iloc[:,0]
bilanco_isim = bilanco_isim.values.tolist()
#○bilanco_isim['bilanco'] = bilanco_isim['bilanco'].str.upper()
excel_sheets = xlrd.open_workbook('Bilanco-Excel/Bilanco.xlsm', on_demand=True)
excel_list = excel_sheets.sheet_names()
excel_list.remove('Anatablo')
excel_list.remove('HISSE-GRAFIK')
excel_list.remove('GRAFİK 2')
excel_list.remove('ÖZEL ORANLAR')
excel_list.remove('Güncel Fiyat')
excel_liste = [x.upper() for x in excel_list]
print (excel_liste)
cekSirkets = pd.read_excel('Hisseler/Hisseler.xlsx')
cekSirketler = cekSirkets[["KOD"]].values.tolist()
print(cekSirketler)
bilanco_isim_revize = []
for i in bilanco_isim:
if i[0] == ' ':
new_i = list(i)
for letter in i:
if letter == ' ':
new_i.pop(0)
else:
i = (''.join(new_i))
bilanco_isim_revize.append(i.upper())
break
else:
bilanco_isim_revize.append(i.upper())
print(bilanco_isim_revize)
excels = os.listdir("Excels/")
matching = [s[:-4] for s in excels if '.xls' in s]
print(len(matching))
total = 0
for excel in matching:
temp = excel.split("-")
keep = len(temp)
total = total + keep
print(total)
npgive = np.empty([total,3], dtype = object)
z = 0
for i in range(len(matching)):
temp = matching[i]
x = temp.split("_")
y = temp.split("-")
for k in range(len(y)):
if k == (len(y) - 1):
temp = y[-1].split("_")
npgive[z][0] = temp[0]
npgive[z][1] = x[-1]
npgive[z][2] = x[-2]
z += 1
else:
npgive[z][0] = y[k]
npgive[z][1] = x[-1]
npgive[z][2] = x[-2]
z += 1
sirketKod = pd.DataFrame({'Kod': npgive[:, 0], 'Donem': npgive[:, 1],'Yil': npgive[:, 2]})
print(sirketKod)
yil = self.ui.lineEdit_3.text()
donem = self.ui.lineEdit_4.text()
print(yil)
print(donem)
yil = int(yil)
donem = int(donem)
donemlik = donem * 3
is_sirketKod = sirketKod[(sirketKod.Yil == yil) & (sirketKod.Donem == donem)]
print(is_sirketKod)
olmadi = []
a = 0
b = 0
for take in excel_liste:
c = sirketKod[sirketKod.Kod == take.upper()]
if(c.empty):
print("fail")
olmadi.append(take.upper())
else:
print("ok")
b += 1
print(olmadi)
donemstr = str(donem)
yilstr = str(yil)
sonExcel = []
for exc in matching:
x = exc.split("_")
if donemstr in x[-1] and yilstr in x[-2]:
sonExcel.append(exc)
else:
continue
# print(sonExcel)
cekExcel = []
for sExc in sonExcel:
for excLi in cekSirketler:
if excLi[0] in sExc:
cekExcel.append(sExc)
cekexcel = []
[cekexcel.append(x) for x in cekExcel if x not in cekexcel]
olmadis = []
print(cekexcel)
for excs in cekexcel:
x = excs.split("-")
if len(x) < 2:
y = excs.split("_")
print(excs)
print(y[0])
excs = str(excs) + ".xls"
npsave = np.empty([len(bilanco_isim_revize),2], dtype = object)
for i in range(len(bilanco_isim_revize)):
npsave[i][0] = bilanco_isim_revize[i]
#seçilen tablodan bilanço verilerinin ayıklanması
manu = pd.read_html("Excels/"+ excs)
npsave[0][1] = str(yil) + "/" + str(donemlik)
bilanchos = []
for i in range(len(manu)):
if len(manu[i].columns) >= 5 and len(manu[i].columns) <= 8:
if len(manu[i])>2:
bilanchos.append(i)
newdf = manu[bilanchos[0]]
del bilanchos[0]
newdf3 = manu[bilanchos[-1]]
del bilanchos[-1]
if len(manu[bilanchos[0]]) == 300:
newdf2 = manu[bilanchos[0]]
else:
frames = []
for i in range(len(bilanchos)):
frames.append(manu[bilanchos[i]])
if len(frames) == 0:
newdf2 = manu[bilanchos[0]]
elif len(frames) >= 1 :
newdf2 = pd.concat(frames, ignore_index=True)
carpanx = manu[0]
carpany = carpanx[1][0]
carpanz = carpany.strip(' TL')
if not carpanz:
carpanz = 1
else:
oldstr = carpanz
if isinstance(oldstr, int):
carpanz = oldstr
else:
newstr = oldstr.replace(".", "")
carpanz = int(newstr)
print(carpanz)
print(len(newdf))
print(len(newdf2))
print(len(newdf3))
for a in bilanchos:
print(len(manu[a]))
#df1 için yapılması
df1 = newdf[[1,3]].dropna(subset = [1])
df1 = df1.reset_index()
df1 = df1.drop("index",axis=1)
df1 = df1.fillna(0)
df1 = df1.reset_index()
df1 = df1.drop("index",axis=1)
df1 = df1.rename(columns={1: "bilanco", 3: "ciro"})
df1['bilanco'] = df1['bilanco'].str.upper()
df1 = df1.replace({'İ':'I'},regex = True)
donen_varliklar = df1.loc[2:54]
ara_toplam_donenvarliklar = df1.loc[51].ciro
toplam_donen_varlıklar = df1.loc[54].ciro
duran_varliklar = df1.loc[55:127]
ozkaynak_yontemiyle_degerlenen_yatirimlar = df1.loc[68].ciro
toplam_duran_varliklar = df1.loc[127].ciro
toplam_varliklar = df1.loc[128].ciro
kisa_vadeli_yukumlulukler = df1.loc[131:190]
finansal_borclar = df1.loc[131].ciro
diger_finansal_yukumlulukler = df1.loc[184].ciro
musteri_soz_dogan_yuk = df1.loc[167].ciro
ertelenmis_gelirler = df1.loc[176].ciro
borc_karsiliklari = df1.loc[180].ciro
ara_toplam_kisavadeliy = df1.loc[187].ciro
toplam_kisa_vadeli = df1.loc[190].ciro
uzun_vadeli_yukumlulukler = df1.loc[192:240]
u_finansal_borclar = df1.loc[192].ciro
u_musteri_soz_dogan_yuk = df1.loc[217].ciro
u_ertelenmis_gelirler = df1.loc[226].ciro
calisanlara_saglanan_faydalara = df1.loc[230].ciro
toplam_uzun_vadeli = df1.loc[240].ciro
ozkaynaklar = df1.loc[243:294]
geçmis_yillar_kar_zararlari = df1.loc[291].ciro
net_donem_kar_zaralari = df1.loc[292].ciro
hisse_senedi_ihrac_primleri = df1.loc[251].ciro
azinlik_paylari = df1.loc[293].ciro
kalemler = df1.loc[245:281]
kalemler = kalemler["ciro"].unique()
diger_ozsermaye_kalemleri = 0
for value in kalemler:
if value == 0:
topla = 0
else:
topla = int(value.replace('.',''))
diger_ozsermaye_kalemleri = diger_ozsermaye_kalemleri + topla
toplam_ozkaynaklar = df1.loc[294].ciro
toplam_kaynaklar = df1.loc[295].ciro
for find in range(1,13):
cost = donen_varliklar[donen_varliklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
npsave[11][1] = int(ara_toplam_donenvarliklar.replace(".", ""))
npsave[1][1] = int(toplam_donen_varlıklar.replace(".", ""))
for find in range(13,30):
cost = duran_varliklar[duran_varliklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = ozkaynak_yontemiyle_degerlenen_yatirimlar
if oldstr == 0:
npsave[19][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[19][1] = int(newstr)
npsave[13][1] = int(toplam_duran_varliklar.replace(".", ""))
npsave[29][1] = int(toplam_varliklar.replace(".", ""))
for find in range(30,45):
cost = kisa_vadeli_yukumlulukler[kisa_vadeli_yukumlulukler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = finansal_borclar
if oldstr == 0:
npsave[32][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[32][1] = int(newstr)
oldstr = diger_finansal_yukumlulukler
if oldstr == 0:
npsave[33][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[33][1] = int(newstr)
oldstr = musteri_soz_dogan_yuk
if oldstr == 0:
npsave[36][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[36][1] = int(newstr)
oldstr = ertelenmis_gelirler
if oldstr == 0:
npsave[39][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[39][1] = int(newstr)
oldstr = borc_karsiliklari
if oldstr == 0:
npsave[41][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[41][1] = int(newstr)
npsave[43][1] = int(ara_toplam_kisavadeliy.replace(".", ""))
npsave[31][1] = int(toplam_kisa_vadeli.replace(".", ""))
for find in range(45,58):
cost = uzun_vadeli_yukumlulukler[uzun_vadeli_yukumlulukler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = u_finansal_borclar
if oldstr == 0:
npsave[46][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[46][1] = int(newstr)
oldstr = u_musteri_soz_dogan_yuk
if oldstr == 0:
npsave[50][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[50][1] = int(newstr)
oldstr = u_ertelenmis_gelirler
if oldstr == 0:
npsave[53][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[53][1] = int(newstr)
oldstr = u_ertelenmis_gelirler
if oldstr == 0:
npsave[53][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[53][1] = int(newstr)
oldstr = calisanlara_saglanan_faydalara
if oldstr == 0:
npsave[55][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[55][1] = int(newstr)
npsave[45][1] = int(toplam_uzun_vadeli.replace(".", ""))
for find in range(58,71):
cost = ozkaynaklar[ozkaynaklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = geçmis_yillar_kar_zararlari
if oldstr == 0:
npsave[66][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[66][1] = int(newstr)
oldstr = net_donem_kar_zaralari
if oldstr == 0:
npsave[67][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[67][1] = int(newstr)
oldstr = hisse_senedi_ihrac_primleri
if oldstr == 0:
npsave[62][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[62][1] = int(newstr)
oldstr = azinlik_paylari
if oldstr == 0:
npsave[69][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[69][1] = int(newstr)
kalemler = df1.loc[245:281]
kalemler = kalemler["ciro"].unique()
diger_ozsermaye_kalemleri = 0
for value in kalemler:
if value == 0:
topla = 0
else:
topla = int(value.replace('.',''))
diger_ozsermaye_kalemleri = diger_ozsermaye_kalemleri + topla
npsave[68][1] = diger_ozsermaye_kalemleri
npsave[58][1] = int(toplam_ozkaynaklar.replace(".", ""))
npsave[70][1] = int(toplam_kaynaklar.replace(".", ""))
#df2 için yapılması
df2 = newdf2[[1,3]].dropna(subset = [1])
df2 = df2.reset_index()
df2 = df2.drop("index",axis=1)
df2 = df2.fillna(0)
df2 = df2.reset_index()
df2 = df2.drop("index",axis=1)
df2 = df2.rename(columns={1: "bilanco", 3: "ciro"})
df2['bilanco'] = df2['bilanco'].str.upper()
df2 = df2.replace({'İ':'I'},regex = True)
surdurulen_faaliyetler= df2.loc[0:148]
satis_gelirleri = df2.loc[2].ciro
satislerin_maliyetleri = df2.loc[3].ciro
f_u_p_k_diğer_ge = df2.loc[6].ciro
f_u_p_k_diğer_gi = df2.loc[17].ciro
f_sektoru_faaliyetlerinden_diger_kar = df2.loc[15].ciro
satis_diger_gelir_ve_giderler = df2.loc[27].ciro
pazarlama_satis_ve_dagıtım_gider = df2.loc[32].ciro
genel_yonetim_giderleri = df2.loc[31].ciro
arastirma_ve_gelistirme_giderleri = df2.loc[33].ciro
diger_faaliyet_gelirleri = df2.loc[34].ciro
diger_faaliyet_giderleri = df2.loc[35].ciro
faaliyet_kari_oncesi_diger_gelir_ve_giderl = df2.loc[36].ciro
faaliyet_kari_zarari = df2.loc[37].ciro
oldstr = faaliyet_kari_zarari
if oldstr == 0:
a = oldstr
else:
newstr = oldstr.replace(".", "")
a = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
b = oldstr
else:
newstr = oldstr.replace(".", "")
b = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
c = oldstr
else:
newstr = oldstr.replace(".", "")
c = int(newstr)
net_faaliyet_kar_zarari = a -( b + c)
yatirim_faaliyetlerinden_giderler = df2.loc[41].ciro
faaliyet_diger_gelir_ve_giderler = df2.loc[44].ciro
ozkaynak_yontemiyle_degerlenen_yatırımlarin_kar_zarar = df2.loc[43].ciro
finansman_gideri_oncesi_faaliyet_kari_zarari = df2.loc[48].ciro
finansal_gelirler = df2.loc[49].ciro
finansal_giderler = df2.loc[50].ciro
surdurulen_faaliyetler_vergi_geliri = df2.loc[53].ciro
donem_vergi_geliri = df2.loc[54].ciro
ertelenmis_vergi_geliri = df2.loc[55].ciro
surdurulen_faaliyetler_donem_kari_zarari = df2.loc[56].ciro
durdurulan_faaliyetler_donem_kari_zarari = df2.loc[57].ciro
durdurulan_faaliyetler_vergi_sonrasi_donem = df2.loc[57].ciro
azinlik_paylari = df2.loc[60].ciro
for find in range(71,122):
cost = surdurulen_faaliyetler[surdurulen_faaliyetler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = satis_gelirleri
if oldstr == 0:
npsave[72][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[72][1] = int(newstr)
oldstr = satislerin_maliyetleri
if oldstr == 0:
npsave[73][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[73][1] = int(newstr)
oldstr = f_u_p_k_diğer_ge
if oldstr == 0:
npsave[76][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[76][1] = int(newstr)
oldstr = f_u_p_k_diğer_gi
if oldstr == 0:
npsave[77][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[77][1] = int(newstr)
oldstr = f_sektoru_faaliyetlerinden_diger_kar
if oldstr == 0:
npsave[78][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[78][1] = int(newstr)
oldstr = satis_diger_gelir_ve_giderler
if oldstr == 0:
npsave[80][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[80][1] = int(newstr)
oldstr = pazarlama_satis_ve_dagıtım_gider
if oldstr == 0:
npsave[82][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[82][1] = int(newstr)
oldstr = genel_yonetim_giderleri
if oldstr == 0:
npsave[83][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[83][1] = int(newstr)
oldstr = arastirma_ve_gelistirme_giderleri
if oldstr == 0:
npsave[84][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[84][1] = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
npsave[85][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[85][1] = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
npsave[86][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[86][1] = int(newstr)
oldstr = faaliyet_kari_oncesi_diger_gelir_ve_giderl
if oldstr == 0:
npsave[87][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[87][1] = int(newstr)
oldstr = faaliyet_kari_zarari
if oldstr == 0:
npsave[88][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[88][1] = int(newstr)
oldstr = df2.loc[37].ciro
if oldstr == 0:
a = oldstr
else:
newstr = oldstr.replace(".", "")
a = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
b = oldstr
else:
newstr = oldstr.replace(".", "")
b = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
c = oldstr
else:
newstr = oldstr.replace(".", "")
c = int(newstr)
net_faaliyet_kar_zarari = a -( b + c)
npsave[89][1] = net_faaliyet_kar_zarari
oldstr = yatirim_faaliyetlerinden_giderler
if oldstr == 0:
npsave[91][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[91][1] = int(newstr)
oldstr = faaliyet_diger_gelir_ve_giderler
if oldstr == 0:
npsave[92][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[92][1] = int(newstr)
oldstr = ozkaynak_yontemiyle_degerlenen_yatırımlarin_kar_zarar
if oldstr == 0:
npsave[93][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[93][1] = int(newstr)
oldstr = finansman_gideri_oncesi_faaliyet_kari_zarari
if oldstr == 0:
npsave[94][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[94][1] = int(newstr)
oldstr = finansal_gelirler
if oldstr == 0:
npsave[95][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[95][1] = int(newstr)
oldstr = finansal_giderler
if oldstr == 0:
npsave[96][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[96][1] = int(newstr)
oldstr = surdurulen_faaliyetler_vergi_geliri
if oldstr == 0:
npsave[99][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[99][1] = int(newstr)
oldstr = donem_vergi_geliri
if oldstr == 0:
npsave[100][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[100][1] = int(newstr)
oldstr = ertelenmis_vergi_geliri
if oldstr == 0:
npsave[101][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[101][1] = int(newstr)
oldstr = surdurulen_faaliyetler_donem_kari_zarari
if oldstr == 0:
npsave[103][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[103][1] = int(newstr)
oldstr = durdurulan_faaliyetler_donem_kari_zarari
if oldstr == 0:
npsave[106][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[106][1] = int(newstr)
oldstr = durdurulan_faaliyetler_vergi_sonrasi_donem
if oldstr == 0:
npsave[105][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[105][1] = int(newstr)
oldstr = azinlik_paylari
if oldstr == 0:
npsave[108][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[108][1] = int(newstr)
#df3 için yapılması
df3 = newdf3[[1,3]].dropna(subset = [1])
df3 = df3.reset_index()
df3 = df3.drop("index",axis=1)
df3 = df3.fillna(0)
df3 = df3.reset_index()
df3 = df3.drop("index",axis=1)
df3 = df3.rename(columns={1: "bilanco", 3: "ciro"})
df3['bilanco'] = df3['bilanco'].astype(str).str.upper()
df3 = df3.replace({'İ':'I'},regex = True)
nakit_akislari = df3.loc[0:202]
amortisman_giderleri = df3.loc[6].ciro
npsave2 = np.empty([12,2],dtype = object)
npsave2[0][0] = "IŞLETME FAALIYETLERINDEN NAKIT AKIŞLARI"
npsave2[1][0] = "DÖNEM KARI (ZARARI)"
npsave2[2][0] = "AMORTISMAN VE ITFA GIDERI ILE ILGILI DÜZELTMELER"
npsave2[3][0] = "IŞLETME SERMAYESINDE GERÇEKLEŞEN DEĞIŞIMLER"
npsave2[4][0] = "FINANSAL YATIRIMLARDAKI AZALIŞ (ARTIŞ)"
npsave2[5][0] = "FAALIYETLERDEN ELDE EDILEN NAKIT AKIŞLARI"
npsave2[6][0] = "YATIRIM FAALIYETLERINDEN KAYNAKLANAN NAKIT AKIŞLARI"
npsave2[7][0] = "MADDI VE MADDI OLMAYAN DURAN VARLIKLARIN ALIMDAN KAYNAKLANAN NAKIT ÇIKIŞLARI"
npsave2[8][0] = "FINANSMAN FAALIYETLERINDEN NAKIT AKIŞLARI"
npsave2[9][0] = "NAKIT VE NAKIT BENZERLERINDEKI NET ARTIŞ (AZALIŞ)"
npsave2[10][0] = "DÖNEM BAŞI NAKIT VE NAKIT BENZERLERI"
npsave2[11][0] = "DÖNEM SONU NAKIT VE NAKIT BENZERLERI"
for find in range(len(npsave2)):
cost = nakit_akislari[nakit_akislari["bilanco"] == npsave2[find][0]].ciro
if cost.empty:
npsave2[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave2[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave2[find][1] = int(newstr)
sistem_1 = pd.DataFrame({'BİLANÇO': npsave[:, 0], 'CİRO': npsave[:, 1]})
sistem_2 = pd.DataFrame({'BİLANÇO': npsave2[:, 0], 'CİRO': npsave2[:, 1]})
excel_aktar = sistem_1.append(sistem_2, ignore_index = True)
excel_aktar["CIRO"] = excel_aktar["CİRO"] * carpanz
app = xw.App(visible=False) # IF YOU WANT EXCEL TO RUN IN BACKGROUND
xlwb = xw.Book('Bilanco-Excel/Bilanco.xlsm')
try:
xlws = xlwb.sheets[y[0].upper()]
except:
try:
xlws = xlwb.sheets[y[0].lower()]
except:
xlwb.close()
app.kill()
olmadis.append(y[0])
continue
xlws.range("B:B").insert('right')
donem = list(excel_aktar.CİRO)
xlws.range('B2').value = donem[0]
ciro = list(excel_aktar.CIRO)
xlws.range('B3').options(transpose=True).value = ciro[1:]
xlwb.save()
xlwb.close()
app.kill()
else:
y = excs.split("_")
z = y[0].split("-")
print(excs)
excs = str(excs) + ".xls"
npsave = np.empty([len(bilanco_isim_revize),2], dtype = object)
for i in range(len(bilanco_isim_revize)):
npsave[i][0] = bilanco_isim_revize[i]
#seçilen tablodan bilanço verilerinin ayıklanması
manu = pd.read_html("Excels/"+ excs)
npsave[0][1] = str(yil) + "/" + str(donemlik)
bilanchos = []
for i in range(len(manu)):
if len(manu[i].columns) >= 5 and len(manu[i].columns) <= 8:
if len(manu[i])>2:
bilanchos.append(i)
newdf = manu[bilanchos[0]]
del bilanchos[0]
newdf3 = manu[bilanchos[-1]]
del bilanchos[-1]
if len(manu[bilanchos[0]]) == 300:
newdf2 = manu[bilanchos[0]]
else:
frames = []
for i in range(len(bilanchos)):
frames.append(manu[bilanchos[i]])
if len(frames) == 1:
newdf2 = manu[bilanchos[0]]
elif len(frames) >= 1 :
newdf2 = pd.concat(frames, ignore_index=True)
carpanx = manu[0]
carpany = carpanx[1][0]
carpanz = carpany.strip(' TL')
if not carpanz:
carpanz = 1
else:
oldstr = carpanz
if isinstance(oldstr, int):
carpanz = oldstr
else:
newstr = oldstr.replace(".", "")
carpanz = int(newstr)
print(carpanz)
print(len(newdf))
print(len(newdf2))
print(len(newdf3))
for a in bilanchos:
print(len(manu[a]))
#df1 için yapılması
df1 = newdf[[1,3]].dropna(subset = [1])
df1 = df1.reset_index()
df1 = df1.drop("index",axis=1)
df1 = df1.fillna(0)
df1 = df1.reset_index()
df1 = df1.drop("index",axis=1)
df1 = df1.rename(columns={1: "bilanco", 3: "ciro"})
df1['bilanco'] = df1['bilanco'].str.upper()
df1 = df1.replace({'İ':'I'},regex = True)
donen_varliklar = df1.loc[2:54]
ara_toplam_donenvarliklar = df1.loc[51].ciro
toplam_donen_varlıklar = df1.loc[54].ciro
duran_varliklar = df1.loc[55:127]
ozkaynak_yontemiyle_degerlenen_yatirimlar = df1.loc[68].ciro
toplam_duran_varliklar = df1.loc[127].ciro
toplam_varliklar = df1.loc[128].ciro
kisa_vadeli_yukumlulukler = df1.loc[131:190]
finansal_borclar = df1.loc[131].ciro
diger_finansal_yukumlulukler = df1.loc[184].ciro
musteri_soz_dogan_yuk = df1.loc[167].ciro
ertelenmis_gelirler = df1.loc[176].ciro
borc_karsiliklari = df1.loc[180].ciro
ara_toplam_kisavadeliy = df1.loc[187].ciro
toplam_kisa_vadeli = df1.loc[190].ciro
uzun_vadeli_yukumlulukler = df1.loc[192:240]
u_finansal_borclar = df1.loc[192].ciro
u_musteri_soz_dogan_yuk = df1.loc[217].ciro
u_ertelenmis_gelirler = df1.loc[226].ciro
calisanlara_saglanan_faydalara = df1.loc[230].ciro
toplam_uzun_vadeli = df1.loc[240].ciro
ozkaynaklar = df1.loc[243:294]
geçmis_yillar_kar_zararlari = df1.loc[291].ciro
net_donem_kar_zaralari = df1.loc[292].ciro
hisse_senedi_ihrac_primleri = df1.loc[251].ciro
azinlik_paylari = df1.loc[293].ciro
kalemler = df1.loc[245:281]
kalemler = kalemler["ciro"].unique()
diger_ozsermaye_kalemleri = 0
for value in kalemler:
if value == 0:
topla = 0
else:
topla = int(value.replace('.',''))
diger_ozsermaye_kalemleri = diger_ozsermaye_kalemleri + topla
toplam_ozkaynaklar = df1.loc[294].ciro
toplam_kaynaklar = df1.loc[295].ciro
for find in range(1,13):
cost = donen_varliklar[donen_varliklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
npsave[11][1] = int(ara_toplam_donenvarliklar.replace(".", ""))
npsave[1][1] = int(toplam_donen_varlıklar.replace(".", ""))
for find in range(13,30):
cost = duran_varliklar[duran_varliklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = ozkaynak_yontemiyle_degerlenen_yatirimlar
if oldstr == 0:
npsave[19][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[19][1] = int(newstr)
npsave[13][1] = int(toplam_duran_varliklar.replace(".", ""))
npsave[29][1] = int(toplam_varliklar.replace(".", ""))
for find in range(30,45):
cost = kisa_vadeli_yukumlulukler[kisa_vadeli_yukumlulukler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = finansal_borclar
if oldstr == 0:
npsave[32][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[32][1] = int(newstr)
oldstr = diger_finansal_yukumlulukler
if oldstr == 0:
npsave[33][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[33][1] = int(newstr)
oldstr = musteri_soz_dogan_yuk
if oldstr == 0:
npsave[36][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[36][1] = int(newstr)
oldstr = ertelenmis_gelirler
if oldstr == 0:
npsave[39][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[39][1] = int(newstr)
oldstr = borc_karsiliklari
if oldstr == 0:
npsave[41][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[41][1] = int(newstr)
npsave[43][1] = int(ara_toplam_kisavadeliy.replace(".", ""))
npsave[31][1] = int(toplam_kisa_vadeli.replace(".", ""))
for find in range(45,58):
cost = uzun_vadeli_yukumlulukler[uzun_vadeli_yukumlulukler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = u_finansal_borclar
if oldstr == 0:
npsave[46][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[46][1] = int(newstr)
oldstr = u_musteri_soz_dogan_yuk
if oldstr == 0:
npsave[50][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[50][1] = int(newstr)
oldstr = u_ertelenmis_gelirler
if oldstr == 0:
npsave[53][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[53][1] = int(newstr)
oldstr = u_ertelenmis_gelirler
if oldstr == 0:
npsave[53][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[53][1] = int(newstr)
oldstr = calisanlara_saglanan_faydalara
if oldstr == 0:
npsave[55][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[55][1] = int(newstr)
npsave[45][1] = int(toplam_uzun_vadeli.replace(".", ""))
for find in range(58,71):
cost = ozkaynaklar[ozkaynaklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = geçmis_yillar_kar_zararlari
if oldstr == 0:
npsave[66][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[66][1] = int(newstr)
oldstr = net_donem_kar_zaralari
if oldstr == 0:
npsave[67][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[67][1] = int(newstr)
oldstr = hisse_senedi_ihrac_primleri
if oldstr == 0:
npsave[62][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[62][1] = int(newstr)
oldstr = azinlik_paylari
if oldstr == 0:
npsave[69][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[69][1] = int(newstr)
kalemler = df1.loc[245:281]
kalemler = kalemler["ciro"].unique()
diger_ozsermaye_kalemleri = 0
for value in kalemler:
if value == 0:
topla = 0
else:
topla = int(value.replace('.',''))
diger_ozsermaye_kalemleri = diger_ozsermaye_kalemleri + topla
npsave[68][1] = diger_ozsermaye_kalemleri
npsave[58][1] = int(toplam_ozkaynaklar.replace(".", ""))
npsave[70][1] = int(toplam_kaynaklar.replace(".", ""))
#df2 için yapılması
df2 = newdf2[[1,3]].dropna(subset = [1])
df2 = df2.reset_index()
df2 = df2.drop("index",axis=1)
df2 = df2.fillna(0)
df2 = df2.reset_index()
df2 = df2.drop("index",axis=1)
df2 = df2.rename(columns={1: "bilanco", 3: "ciro"})
df2['bilanco'] = df2['bilanco'].str.upper()
df2 = df2.replace({'İ':'I'},regex = True)
surdurulen_faaliyetler= df2.loc[0:148]
satis_gelirleri = df2.loc[2].ciro
satislerin_maliyetleri = df2.loc[3].ciro
f_u_p_k_diğer_ge = df2.loc[6].ciro
f_u_p_k_diğer_gi = df2.loc[17].ciro
f_sektoru_faaliyetlerinden_diger_kar = df2.loc[15].ciro
satis_diger_gelir_ve_giderler = df2.loc[27].ciro
pazarlama_satis_ve_dagıtım_gider = df2.loc[32].ciro
genel_yonetim_giderleri = df2.loc[31].ciro
arastirma_ve_gelistirme_giderleri = df2.loc[33].ciro
diger_faaliyet_gelirleri = df2.loc[34].ciro
diger_faaliyet_giderleri = df2.loc[35].ciro
faaliyet_kari_oncesi_diger_gelir_ve_giderl = df2.loc[36].ciro
faaliyet_kari_zarari = df2.loc[37].ciro
oldstr = faaliyet_kari_zarari
if oldstr == 0:
a = oldstr
else:
newstr = oldstr.replace(".", "")
a = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
b = oldstr
else:
newstr = oldstr.replace(".", "")
b = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
c = oldstr
else:
newstr = oldstr.replace(".", "")
c = int(newstr)
net_faaliyet_kar_zarari = a -( b + c)
yatirim_faaliyetlerinden_giderler = df2.loc[41].ciro
faaliyet_diger_gelir_ve_giderler = df2.loc[44].ciro
ozkaynak_yontemiyle_degerlenen_yatırımlarin_kar_zarar = df2.loc[43].ciro
finansman_gideri_oncesi_faaliyet_kari_zarari = df2.loc[48].ciro
finansal_gelirler = df2.loc[49].ciro
finansal_giderler = df2.loc[50].ciro
surdurulen_faaliyetler_vergi_geliri = df2.loc[53].ciro
donem_vergi_geliri = df2.loc[54].ciro
ertelenmis_vergi_geliri = df2.loc[55].ciro
surdurulen_faaliyetler_donem_kari_zarari = df2.loc[56].ciro
durdurulan_faaliyetler_donem_kari_zarari = df2.loc[57].ciro
durdurulan_faaliyetler_vergi_sonrasi_donem = df2.loc[57].ciro
azinlik_paylari = df2.loc[60].ciro
for find in range(71,122):
cost = surdurulen_faaliyetler[surdurulen_faaliyetler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = satis_gelirleri
if oldstr == 0:
npsave[72][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[72][1] = int(newstr)
oldstr = satislerin_maliyetleri
if oldstr == 0:
npsave[73][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[73][1] = int(newstr)
oldstr = f_u_p_k_diğer_ge
if oldstr == 0:
npsave[76][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[76][1] = int(newstr)
oldstr = f_u_p_k_diğer_gi
if oldstr == 0:
npsave[77][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[77][1] = int(newstr)
oldstr = f_sektoru_faaliyetlerinden_diger_kar
if oldstr == 0:
npsave[78][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[78][1] = int(newstr)
oldstr = satis_diger_gelir_ve_giderler
if oldstr == 0:
npsave[80][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[80][1] = int(newstr)
oldstr = pazarlama_satis_ve_dagıtım_gider
if oldstr == 0:
npsave[82][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[82][1] = int(newstr)
oldstr = genel_yonetim_giderleri
if oldstr == 0:
npsave[83][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[83][1] = int(newstr)
oldstr = arastirma_ve_gelistirme_giderleri
if oldstr == 0:
npsave[84][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[84][1] = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
npsave[85][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[85][1] = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
npsave[86][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[86][1] = int(newstr)
oldstr = faaliyet_kari_oncesi_diger_gelir_ve_giderl
if oldstr == 0:
npsave[87][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[87][1] = int(newstr)
oldstr = faaliyet_kari_zarari
if oldstr == 0:
npsave[88][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[88][1] = int(newstr)
oldstr = df2.loc[37].ciro
if oldstr == 0:
a = oldstr
else:
newstr = oldstr.replace(".", "")
a = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
b = oldstr
else:
newstr = oldstr.replace(".", "")
b = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
c = oldstr
else:
newstr = oldstr.replace(".", "")
c = int(newstr)
net_faaliyet_kar_zarari = a -( b + c)
npsave[89][1] = net_faaliyet_kar_zarari
oldstr = yatirim_faaliyetlerinden_giderler
if oldstr == 0:
npsave[91][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[91][1] = int(newstr)
oldstr = faaliyet_diger_gelir_ve_giderler
if oldstr == 0:
npsave[92][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[92][1] = int(newstr)
oldstr = ozkaynak_yontemiyle_degerlenen_yatırımlarin_kar_zarar
if oldstr == 0:
npsave[93][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[93][1] = int(newstr)
oldstr = finansman_gideri_oncesi_faaliyet_kari_zarari
if oldstr == 0:
npsave[94][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[94][1] = int(newstr)
oldstr = finansal_gelirler
if oldstr == 0:
npsave[95][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[95][1] = int(newstr)
oldstr = finansal_giderler
if oldstr == 0:
npsave[96][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[96][1] = int(newstr)
oldstr = surdurulen_faaliyetler_vergi_geliri
if oldstr == 0:
npsave[99][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[99][1] = int(newstr)
oldstr = donem_vergi_geliri
if oldstr == 0:
npsave[100][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[100][1] = int(newstr)
oldstr = ertelenmis_vergi_geliri
if oldstr == 0:
npsave[101][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[101][1] = int(newstr)
oldstr = surdurulen_faaliyetler_donem_kari_zarari
if oldstr == 0:
npsave[103][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[103][1] = int(newstr)
oldstr = durdurulan_faaliyetler_donem_kari_zarari
if oldstr == 0:
npsave[106][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[106][1] = int(newstr)
oldstr = durdurulan_faaliyetler_vergi_sonrasi_donem
if oldstr == 0:
npsave[105][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[105][1] = int(newstr)
oldstr = azinlik_paylari
if oldstr == 0:
npsave[108][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[108][1] = int(newstr)
#df3 için yapılması
df3 = newdf3[[1,3]].dropna(subset = [1])
df3 = df3.reset_index()
df3 = df3.drop("index",axis=1)
df3 = df3.fillna(0)
df3 = df3.reset_index()
df3 = df3.drop("index",axis=1)
df3 = df3.rename(columns={1: "bilanco", 3: "ciro"})
df3['bilanco'] = df3['bilanco'].astype(str).str.upper()
df3 = df3.replace({'İ':'I'},regex = True)
nakit_akislari = df3.loc[0:202]
amortisman_giderleri = df3.loc[6].ciro
npsave2 = np.empty([12,2],dtype = object)
npsave2[0][0] = "IŞLETME FAALIYETLERINDEN NAKIT AKIŞLARI"
npsave2[1][0] = "DÖNEM KARI (ZARARI)"
npsave2[2][0] = "AMORTISMAN VE ITFA GIDERI ILE ILGILI DÜZELTMELER"
npsave2[3][0] = "IŞLETME SERMAYESINDE GERÇEKLEŞEN DEĞIŞIMLER"
npsave2[4][0] = "FINANSAL YATIRIMLARDAKI AZALIŞ (ARTIŞ)"
npsave2[5][0] = "FAALIYETLERDEN ELDE EDILEN NAKIT AKIŞLARI"
npsave2[6][0] = "YATIRIM FAALIYETLERINDEN KAYNAKLANAN NAKIT AKIŞLARI"
npsave2[7][0] = "MADDI VE MADDI OLMAYAN DURAN VARLIKLARIN ALIMDAN KAYNAKLANAN NAKIT ÇIKIŞLARI"
npsave2[8][0] = "FINANSMAN FAALIYETLERINDEN NAKIT AKIŞLARI"
npsave2[9][0] = "NAKIT VE NAKIT BENZERLERINDEKI NET ARTIŞ (AZALIŞ)"
npsave2[10][0] = "DÖNEM BAŞI NAKIT VE NAKIT BENZERLERI"
npsave2[11][0] = "DÖNEM SONU NAKIT VE NAKIT BENZERLERI"
for find in range(len(npsave2)):
cost = nakit_akislari[nakit_akislari["bilanco"] == npsave2[find][0]].ciro
if cost.empty:
npsave2[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave2[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave2[find][1] = int(newstr)
sistem_1 = pd.DataFrame({'BİLANÇO': npsave[:, 0], 'CİRO': npsave[:, 1]})
sistem_2 = pd.DataFrame({'BİLANÇO': npsave2[:, 0], 'CİRO': npsave2[:, 1]})
excel_aktar = sistem_1.append(sistem_2, ignore_index = True)
excel_aktar["CIRO"] = excel_aktar["CİRO"] * carpanz
for items in z:
print(items)
app = xw.App(visible=False) # IF YOU WANT EXCEL TO RUN IN BACKGROUND
xlwb = xw.Book('Bilanco-Excel/Bilanco.xlsm')
try:
xlws = xlwb.sheets[items.upper()]
except:
try:
xlws = xlwb.sheets[items.lower()]
except:
xlwb.close()
app.kill()
olmadis.append(items)
continue
xlws.range("B:B").insert('right')
donem = list(excel_aktar.CİRO)
xlws.range('B2').value = donem[0]
ciro = list(excel_aktar.CIRO)
xlws.range('B3').options(transpose=True).value = ciro[1:]
xlwb.save()
xlwb.close()
app.kill()
self.ui.listWidget.addItems(olmadis)
self.ui.bildirim.setText("Veriler excel'e aktarildi!")
def listeyeDok(self):
df_sirket = pd.read_html('Sirketler/Sirketler.xls')
print(df_sirket)
sirketler = []
for i in range(len(df_sirket)):
temp = df_sirket[i][1][1:]
temp = temp.to_list()
for k in range(len(temp)):
s = temp[k]
sirketler.append(s)
model = QtGui.QStandardItemModel()
self.ui.tumSirketler.setModel(model)
for i in sirketler:
item = QtGui.QStandardItem(i)
model.appendRow(item)
self.ui.bildirim.setText("Sirket Verileri Cekildi!")
# self.gridLayout.addWidget(self.listView, 1, 0, 1, 2)
def widgetListele(self):
self.ui.sirketler.clear()
df_sirket = | pd.read_html('Sirketler/Sirketler.xls') | pandas.read_html |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.