prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Core utilities"""
import sys
import logging
import inspect
from functools import singledispatch
from copy import deepcopy
from typing import (
Any,
Callable,
Iterable,
List,
Mapping,
Sequence,
Union,
Tuple,
)
import numpy
from numpy import array as Array
import pandas
from pandas import Categorical, DataFrame, Series
from pipda import register_func
from pipda.symbolic import Reference
from pipda.utils import CallingEnvs
from .exceptions import (
ColumnNotExistingError,
DataUnrecyclable,
NameNonUniqueError,
)
from .contexts import Context
from .types import (
StringOrIter,
Dtype,
is_iterable,
is_scalar,
is_categorical,
is_null,
)
from .defaults import DEFAULT_COLUMN_PREFIX, NA_REPR
# logger
logger = logging.getLogger("datar")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(
logging.Formatter(
"[%(asctime)s][%(name)s][%(levelname)7s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
logger.addHandler(stream_handler)
def vars_select(
all_columns: Iterable[str],
*columns: Any,
raise_nonexists: bool = True,
base0: bool = None,
) -> List[int]:
# TODO: support selecting data-frame columns
"""Select columns
Args:
all_columns: The column pool to select
*columns: arguments to select from the pool
raise_nonexist: Whether raise exception when column not exists
in the pool
base0: Whether indexes are 0-based if columns are selected by indexes.
If not given, will use `datar.base.get_option('index.base.0')`
Returns:
The selected indexes for columns
Raises:
ColumnNotExistingError: When the column does not exist in the pool
and raise_nonexists is True.
"""
from .collections import Collection
from ..base import unique
columns = [
column.name if isinstance(column, Series) else column
for column in columns
]
selected = Collection(*columns, pool=list(all_columns), base0=base0)
if raise_nonexists and selected.unmatched and selected.unmatched != {None}:
raise ColumnNotExistingError(
f"Columns `{selected.unmatched}` do not exist."
)
return unique(selected).astype(int)
def recycle_value(
value: Any, size: int, name: str = None
) -> Union[DataFrame, numpy.ndarray]:
"""Recycle a value based on a dataframe
Args:
value: The value to be recycled
size: The size to recycle to
name: The name to show in the error if failed to recycle
Returns:
The recycled value
"""
# TODO: follow base R's recycling rule? i.e. size 2 -> 4
from ..base import NA
if is_scalar(value):
value = [value]
length = len(value)
if length not in (0, 1, size):
name = "value" if not name else f"`{name}`"
expect = "1" if size == 1 else f"(1, {size})"
raise DataUnrecyclable(
f"Cannot recycle {name} to size {size}, "
f"expect {expect}, got {length}."
)
if isinstance(value, DataFrame):
if length == size == 0:
return DataFrame(columns=value.columns)
if length == 0:
value = DataFrame([[NA] * value.shape[1]], columns=value.columns)
if length == 1 and size > length:
return value.iloc[[0] * size, :].reset_index(drop=True)
return value
cats = categorized(value).categories if is_categorical(value) else None
if length == size == 0:
return [] if cats is None else Categorical([], categories=cats)
if length == 0:
value = [NA]
if isinstance(value, Series):
# try to keep Series class
# some operators can only do with it or with it correctly
# For example:
# Series([True, True]) & Series([False, NA]) -> [False, Fa.se]
# But with numpy.array, it raises error, since NA is a float
if length == 1 and size > length:
value = value.iloc[[0] * size].reset_index(drop=True)
return value
if isinstance(value, tuple):
value = list(value)
# dtype = getattr(value, 'dtype', None)
if length == 1 and size > length:
value = list(value) * size
if cats is not None:
return | Categorical(value, categories=cats) | pandas.Categorical |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from src.policies.single_policy_functions import _interpolate_activity_level
from src.policies.single_policy_functions import reduce_recurrent_model
from src.policies.single_policy_functions import reduce_work_model
from src.policies.single_policy_functions import reopen_other_model
from src.policies.single_policy_functions import shut_down_model
@pytest.fixture
def fake_states():
states = pd.DataFrame(index=np.arange(10))
states["state"] = ["Bayern", "Berlin"] * 5
# date at which schools are open in Berlin but closed in Bavaria
# date with uneven week number, i.e. where group a attends school
states["date"] = pd.Timestamp("2020-04-23")
states["school_group_a"] = [0, 1] * 5
states["occupation"] = pd.Categorical(
["school"] * 8 + ["preschool_teacher", "school_teacher"]
)
states["educ_worker"] = [False] * 8 + [True] * 2
states["age"] = np.arange(10)
return states
def test_shut_down_model_non_recurrent():
contacts = pd.Series(np.arange(3))
states = pd.DataFrame(index=["a", "b", "c"])
calculated = shut_down_model(states, contacts, 123, is_recurrent=False)
expected = pd.Series(0, index=["a", "b", "c"])
assert_series_equal(calculated, expected)
def test_shut_down_model_recurrent():
contacts = pd.Series(np.arange(3))
states = pd.DataFrame(index=["a", "b", "c"])
calculated = shut_down_model(states, contacts, 123, is_recurrent=True)
expected = pd.Series(False, index=["a", "b", "c"])
assert_series_equal(calculated, expected)
def test_reduce_recurrent_model_set_zero():
states = pd.DataFrame(index=[0, 1, 2, 3])
contacts = pd.Series([True, True, False, False])
calculated = reduce_recurrent_model(states, contacts, 333, multiplier=0.0)
assert (calculated == 0).all()
def test_reduce_recurrent_model_no_change():
states = pd.DataFrame(index=[0, 1, 2, 3])
contacts = pd.Series([True, True, False, False])
calculated = reduce_recurrent_model(states, contacts, 333, multiplier=1.0)
assert np.allclose(contacts, calculated)
def test_reduce_recurrent_model_one_in_four():
n_obs = 10_000
states = pd.DataFrame(index=np.arange(n_obs))
contacts = pd.Series([True, False] * int(n_obs / 2))
calculated = reduce_recurrent_model(
states=states, contacts=contacts, seed=1234, multiplier=0.25
)
# check that we get expected number of contacts right
calculated_mean = calculated.mean()
expected_mean = 0.125
assert np.allclose(calculated_mean, expected_mean, rtol=0.005, atol=0.005)
# check that people who stayed home before policy still stay home
assert not calculated[~contacts].any()
def test_reduce_work_model(fake_states):
fake_states["work_contact_priority"] = np.arange(10)[::-1] / 10
contacts = pd.Series(1, index=fake_states.index)
contacts[2] = 0
calculated = reduce_work_model(
states=fake_states,
contacts=contacts,
seed=123,
attend_multiplier=0.5,
hygiene_multiplier=1.0,
is_recurrent=False,
)
expected = pd.Series(
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], index=fake_states.index, dtype=float
)
assert_series_equal(calculated, expected)
def test_reduce_work_model_with_hygiene_multiplier(fake_states):
fake_states["work_contact_priority"] = np.arange(10)[::-1] / 10
contacts = pd.Series(2, index=fake_states.index)
contacts[2] = 0
contacts[3] = 5
calculated = reduce_work_model(
states=fake_states,
contacts=contacts,
seed=123,
attend_multiplier=0.5,
hygiene_multiplier=0.5,
is_recurrent=False,
)
expected = pd.Series(
[1, 1, 0, 2.5, 0, 0, 0, 0, 0, 0], index=fake_states.index, dtype=float
)
assert_series_equal(calculated, expected)
def test_reduce_work_model_multiplier_series(fake_states):
fake_states["work_contact_priority"] = np.arange(10)[::-1] / 10
contacts = pd.Series(True, index=fake_states.index)
contacts[2] = False
calculated = reduce_work_model(
states=fake_states,
contacts=contacts,
seed=123,
attend_multiplier=pd.Series([0.5], index=[pd.Timestamp("2020-04-23")]),
hygiene_multiplier=1.0,
is_recurrent=True,
)
expected = pd.Series(
[True, True, False, True, False, False, False, False, False, False],
index=fake_states.index,
)
assert_series_equal(calculated, expected)
def test_reduce_work_model_multiplier_frame_missing_state(fake_states):
fake_states["work_contact_priority"] = np.arange(10)[::-1] / 10
fake_states["state"] = ["A", "B"] * 5
contacts = pd.Series(6, index=fake_states.index)
contacts[2] = 0
multiplier = pd.DataFrame(data={"A": [0.5]}, index=[pd.Timestamp("2020-04-23")])
with pytest.raises(AssertionError):
reduce_work_model(
states=fake_states,
contacts=contacts,
seed=123,
attend_multiplier=multiplier,
hygiene_multiplier=1.0,
is_recurrent=False,
)
def test_reduce_work_model_multiplier_frame(fake_states):
fake_states["work_contact_priority"] = np.arange(10)[::-1] / 10
fake_states["state"] = ["A", "B"] * 5
contacts = | pd.Series(1, index=fake_states.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 13 22:45:00 2018
@author: benmo
"""
import pandas as pd, numpy as np, dask.dataframe as ddf
import quandl
import sys, os, socket
import pickle
from dask import delayed
from difflib import SequenceMatcher
from matplotlib.dates import bytespdate2num, num2date
from matplotlib.ticker import Formatter
import re
from itertools import permutations, product, chain
from functools import reduce
import struct
similar = lambda a, b: SequenceMatcher(None, a, b).ratio()
crs4326 = {'init': 'epsg:4326'}
def mostSimilar(x,term):
temp = np.array([x,list(map(lambda x: similar(x,term), x))]).T
return pd.DataFrame(temp,
columns=['Name','Score']).sort_values('Score',ascending=False)
def getEconVars():
varz = pd.read_csv("/home/benmo/Data/Econ/Indicators/indicators.csv")
fedData = pickleLib.fedData()
econDict = {}
for col in varz.columns:
temp = varz[col].dropna()
econDict[col] = {}
for var in temp:
econDict[col][var] = mostSimilar(fedData.item, var).iloc[:5].set_index(
'Name').to_dict()
return econDict
#"/home/benmo/Data/PyObjects/commodities.pkl"
def get_commodities():
oil = quandl.get('CHRIS/CME_WS1', authtoken="<KEY>")
natgas = quandl.get('CHRIS/CME_NG1', authtoken="<KEY>")
gold = quandl.get('CHRIS/CME_GC1', authtoken="<KEY>")
rice = quandl.get('CHRIS/ODE_TR1', authtoken="<KEY>")
grain = quandl.get('CHRIS/EUREX_FCGR1', authtoken="<KEY>")
lumber = quandl.get('CHRIS/CME_LB1', authtoken="<KEY>")
steelCHN = quandl.get('CHRIS/SHFE_WR1', authtoken="<KEY>")
steelUSA = quandl.get('CHRIS/CME_HR1', authtoken="<KEY>")
coal = quandl.get('CHRIS/SGX_CFF1', authtoken="<KEY>")
df = pd.DataFrame([])
for (key, temp) in zip(['Oil', 'Natural Gas', 'Gold', 'Rice', 'Grain',
'Lumber', 'SteelCHN', 'SteelUSA', 'Coal'], [oil, natgas, gold, rice,
grain, lumber, steelCHN,
steelUSA, coal]):
temp['Commodity'] = key
df = df.append(temp)
return df
def get_etfs():
oil = quandl.get('CHRIS/CME_WS1', authtoken="<KEY>")
natgas = quandl.get('CHRIS/CME_NG1', authtoken="<KEY>")
gold = quandl.get('CHRIS/CME_GC1', authtoken="<KEY>")
rice = quandl.get('CHRIS/ODE_TR1', authtoken="<KEY>")
grain = quandl.get('CHRIS/EUREX_FCGR1', authtoken="<KEY>")
lumber = quandl.get('CHRIS/CME_LB1', authtoken="<KEY>")
steelCHN = quandl.get('CHRIS/SHFE_WR1', authtoken="<KEY>")
steelUSA = quandl.get('CHRIS/CME_HR1', authtoken="<KEY>")
coal = quandl.get('CHRIS/SGX_CFF1', authtoken="<KEY>")
df = pd.DataFrame([])
for (key, temp) in zip(['Oil', 'Natural Gas', 'Gold', 'Rice', 'Grain',
'Lumber', 'SteelCHN', 'SteelUSA', 'Coal'], [oil, natgas, gold, rice,
grain, lumber, steelCHN,
steelUSA, coal]):
temp['Commodity'] = key
df = df.append(temp)
return df
def print_lines(fn, N, out=None):
fout=open(out, 'w+') if out == None else None
f=open(fn)
for i in range(N):
line=f.readline()
print(line) if out == None else fout.write(line)
f.close()
fout.close() if out == None else print('no file written')
tuple2str = lambda name: name if isinstance(name, tuple) ==False else reduce(lambda x, y: str(x)
.replace('.0','') + '_' + str(y).replace('.0',''), list(map(lambda xi: str(xi), name)))
def search_str(regx, string):
return True if re.search(regx, string) else False
def returnFiltered(term, data):
temp = list(filter(lambda x: term
in x.lower(), data['item']))
return data[data.isin(temp).item==True]
def egen(data, f, applyto, groupby, column_filt, newcol):
tmp = data[column_filt]
tmp[newcol] = tmp.groupby(groupby).apply(f)
tmp['index'] = tmp.index
return pd.merge(data, tmp, how='inner', left_on=column_filt, right_on =applyto + ['index'])
def read_idx(filename):
with open(filename, 'rb') as f:
zero, data_type, dims = struct.unpack('>HBB', f.read(4))
shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))
return np.fromstring(f.read(), dtype=np.uint8).reshape(shape)
class MyComp():
cName = socket.gethostname()
if sys.platform == 'linux':
ffProfilePath = "/home/benmo/.mozilla/firefox/w55ako72.dev-edition-default"
picklePath = "/home/benmo/Data/PyObjects"
else:
if cName == 'DESKTOP-HOKP1GT':
ffProfilePath = "C:/Users/benmo/AppData/Roaming/Mozilla/Firefox/Profiles/it0uu1ch.default"
uofcPath = "D:/OneDrive - University of Calgary"
financePath = "C:/users/benmo/OneDrive/2016& 2017Classes/Financial Econ"
picklePath = "D:/data/pyobjects"
classesPath = "C:/users/benmo/OneDrive/2016& 2017Classes"
else:
ffProfilePath = "C:/Users/benmo/AppData/Roaming/Mozilla/Firefox/Profiles/vpv78y9i.default"
uofcPath = "D:/benmo/OneDrive - University of Calgary"
financePath = "D:/benmo/OneDrive/2016& 2017Classes/Financial Econ"
picklePath = "D:/data/pyobjects"
classesPath = "D:/benmo/OneDrive/2016& 2017Classes"
def mySAS():
bob = pd.read_sas("D:/data/Personal Research/pcg15Public/pcg15Public/epcg15.sas7bdat")
return bob
def collect_csv(path, na_val='NA',skiprows=0, dtype_map=None):
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, skiprows=skiprows, dtype=dtype_map)))
except:
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, skiprows=skiprows, dtype=dtype_map)))
except:
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, dtype=str,
skiprows=skiprows)))
except:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, dtype=str,
na_values=na_val, skiprows=skiprows)))
'''example:
bob = ddf.read_csv('Z:/Electricity/*.csv',skiprows=2,dtype={'Date': str,
'HE': str,
'Import/Export': str,
'Asset Id': str,
'Block Number': str,
'Price': 'float64',
'From': 'int64',
'To': 'int64',
'Size': 'int64',
'Available': 'int64',
'Dispatched': str,
'Dispatched MW': 'int64',
'Flexible': str,
'Offer Control': str})
bob=bob.drop('Effective Date/Time',axis=1)
bob.compute().to_csv('Z:/Electricity/Combined.csv',index=False)
'''
def nestmap(outer, outerf, innerf, mapping=list):
return map(lambda x: outerf(mapping(map(lambda inner: innerf(inner), x))), outer)
def test():
bob = pd.read_csv("C:/users/benmo/desktop/fedReserve.csv")
list(filter(lambda x: 'utl' in x.lower(), bob['item']))
data = quandl.get('FED/DTCOLRHTS_N_M', authtoken="<KEY>")
class pickleLib:
picklePath = MyComp.picklePath
pathStates = picklePath + '/usaStates.pkl'
pathCensus = "D:/Data/Personal Research/usadata.dta"
states = lambda pth=pathStates: pd.read_pickle(pth)
priceData = lambda pth=picklePath + "/priceData.pkl": pd.read_pickle(pth)
fedData = lambda pth=picklePath + "/fedData.pkl": pd.read_pickle(pth)
futuresData = lambda pth=picklePath + "/futuresData.pkl": pd.read_pickle(pth)
treasuryData = lambda pth=picklePath + "/treasuryData.pkl": pd.read_pickle(pth)
globalYieldData = lambda pth=picklePath + "/globalYield.pkl": pd.read_pickle(pth)
pwt9 = lambda pth=picklePath + "/pwt9.pkl": pd.read_pickle(pth)
aesoMO = lambda pth=picklePath + "/aesoMeritOrder.pkl": pd.read_pickle(pth)
aesoPool = lambda pth=picklePath + "/aesoPool.pkl": pd.read_pickle(pth)
aesodata = lambda pth=picklePath + "/aesodata.pkl": pd.read_pickle(pth)
aesoforecast = lambda pth=picklePath + "/aesoforecast.pkl": | pd.read_pickle(pth) | pandas.read_pickle |
import numpy as np
import pandas as pd
def get_weights_ffd(d, thres, max_size=10_000):
"""
Snippet 5.3 (page 83) The New Fixed-Width Window FracDiff Method
:param d: int
:param thres: float
:param max_size: int, Set the maximum size for stability
:return:
"""
w = [1.]
for k in range(1, max_size):
w_ = -w[-1] / k * (d - k + 1)
if abs(w_) <= thres:
break
w.append(w_)
w = np.array(w)
return w
def frac_diff_ffd(series, d, lag=1, thres=1e-5, max_size=10_000):
"""
Snippet 5.3 (page 83) The New Fixed-Width Window FracDiff Method
Compute Fractional Differentiation
:param series:
:param d:
:param lag:
:param thres:
:param max_size:
:return:
"""
max_size = int(max_size / lag)
w = get_weights_ffd(d, thres, max_size)
width = len(w)
series_ = series.fillna(method='ffill').dropna()
rolling_array = []
for i in range(width):
rolling_array.append(series_.shift(i * lag).values)
rolling_array = np.array(rolling_array)
series_val = np.dot(rolling_array.T, w)
series_val = series_val.reshape(-1, )
timestamps = series.index[-len(series_val):]
series = | pd.Series(series_val, index=timestamps) | pandas.Series |
import mysql.connector
from pandas.io import sql
import praw
from praw.models import MoreComments
import pandas as pd
import finnhub
import yaml
import datetime
import time
from tqdm import tqdm
class Database:
"""
Class for connecting to and manipulating relational database
"""
def __init__(self):
# Open yml to get connectivity info
with open("IDs.yml") as file:
self.info = yaml.load(file, Loader=yaml.FullLoader)
self.ENDPOINT = self.info['MySQL']['ENDPOINT']
self.PORT = self.info['MySQL']['PORT']
self.REGION = self.info['MySQL']['REGION']
self.USR = self.info['MySQL']['USR']
self.DBNAME = self.info['MySQL']['DBNAME']
self.PASSWORD = self.info['MySQL']['master_password']
# Connect to database
try:
self.conn = mysql.connector.connect(host=self.ENDPOINT, user=self.USR, passwd=self.PASSWORD)#, database=self.DBNAME)
print('connection established')
self.cur = self.conn.cursor()
self.cur.execute("""SELECT now()""")
query_results = self.cur.fetchall()
print(query_results)
except Exception as e:
print("Database connection failed due to {}".format(e))
def initialize_database(self):
"""
Method initializing database
"""
sql1 = '''CREATE DATABASE DB1'''
sql2 = '''USE DB1'''
self.cur.execute(sql1)
self.cur.execute(sql2)
return
def use_database(self, database_name):
"""
Method determining what database to use
Args:
database_name (str): Name of database to be used
"""
sql = '''USE {}'''.format(database_name)
self.cur.execute(sql)
def initialize_tables(self):
"""
Method initializing tables
"""
sql1 = '''CREATE TABLE POSTS(
POST_ID CHAR(20) PRIMARY KEY NOT NULL,
STOCK_ID CHAR(20) NOT NULL,
TITLE CHAR(100),
SCORE INT,
SUBREDDIT CHAR(20),
URL CHAR(50),
NUM_COMMENTS INT,
BODY TEXT,
TARGET INT,
CREATED FLOAT,
LAST_SCRAPED DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
)'''
sql2 = '''CREATE TABLE COMMENTS(
COMMENT_ID CHAR(20) PRIMARY KEY NOT NULL,
POST_ID CHAR(20) NOT NULL,
STOCK_ID CHAR(20) NOT NULL,
TARGET INT,
COMMENT TEXT,
LAST_SCRAPED DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
)'''
sql3 = '''CREATE TABLE STOCKS(
STOCK_ID CHAR(20) PRIMARY KEY NOT NULL,
LAST_SCRAPED DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
)'''
self.cur.execute(sql1)
self.cur.execute(sql2)
self.cur.execute(sql3)
return
def insert(self, table, data):
"""
Method for inserting row into table
Args:
table (str): Table to be inserted into
data (list): List of data to be inserted into table
"""
if table == 'POSTS':
sql = '''INSERT INTO POSTS (POST_ID, STOCK_ID, TITLE, SCORE, SUBREDDIT, URL, NUM_COMMENTS, BODY, TARGET, CREATED)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);'''
self.cur.execute(sql, (data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9]))
self.conn.commit()
return
elif table == 'COMMENTS':
sql = '''INSERT INTO COMMENTS(COMMENT_ID, POST_ID, STOCK_ID, TARGET, COMMENT)
VALUES (%s, %s, %s, %s, %s);'''
self.cur.execute(sql, (data[0], data[1], data[2], data[3], data[4]))
self.conn.commit()
return
elif table == 'STOCKS':
# sql = '''INSERT INTO STOCKS(STOCK_ID, LAST_SCRAPED)
# VALUES (%s, NOW());'''
sql = '''INSERT INTO STOCKS(STOCK_ID)
VALUES('{}');'''.format(data)
self.cur.execute(sql)
self.conn.commit()
return
else:
raise Exception("Only 'POSTS', 'COMMENTS' or 'STOCKS' valid arguments for 'table'")
def label(self, table_name, ID, label):
"""
Method for labeling unlabeled posts and comments
Args:
table_name (str): Name of desired table
ID (str): ID of post or comment to be labeled
label (int): label of post or comment (either 1 or 0)
"""
if table_name=='POSTS':
sql = "UPDATE POSTS SET TARGET={} WHERE POST_ID='{}';".format(label, ID)
elif table_name=='COMMENTS':
sql = "UPDATE COMMENTS SET TARGET={} WHERE COMMENT_ID='{}';".format(label, ID)
else:
raise Exception("Only 'POSTS' or 'COMMENTS' valid arguments for 'table_name'")
self.cur.execute(sql)
self.conn.commit()
return
def update_last_scraped(self, stock_id):
"""
Method to update Stock table to show the last time the stock was scraped
Args:
stock_id (str): Id of stock to update
"""
sql = "UPDATE STOCKS SET LAST_SCRAPED=NOW() WHERE STOCK_ID='{}';".format(stock_id)
self.cur.execute(sql)
self.conn.commit()
return
def query(self, sql):
"""
Method ot query database
Args:
sql (str): SQL Statement
"""
self.cur.execute(sql)
return self.cur.fetchall()
def drop_table(self, table_name):
"""
Method to drop table from database
Args:
table_name (str): Name of table to be dropped
"""
sql = '''DROP TABLE {} ;'''.format(table_name)
self.cur.execute(sql)
return
def get_keys(website):
"""
Helper fucntion to get passwords for external APIs
Args:
website (str): name of website
"""
with open("IDs.yml") as file:
IDs = yaml.load(file, Loader=yaml.FullLoader)
if website=="Reddit":
client_id = IDs['Reddit']['client_id']
client_secret = IDs['Reddit']['client_secret']
return client_id, client_secret
if website=="Finnhub":
api_key = IDs["Finnhub"]["api_key"]
return api_key
class ScrapeWSB:
"""
Class to scrape r/wallstreetbets
"""
def __init__(self, stock_name, num_posts, num_comments, sort_type="hot", time_filter="day"):
"""
Args:
stock_name (str): Name of stock to be scraped
num_posts (int): Number of posts to be scraped
num_comments (int): Number of comments to be scraped
sort_type (str): Way to sort top posts ("hot", etc) <--- FILL IN LATER
time_filter(str): Time period from which to scrape posts ("day", "week", "month")
"""
self.stock_name = stock_name
self.num_posts = num_posts
self.num_comments = num_comments
self.sort_type=sort_type
self.time_filter = time_filter
# Create "reddit" object
#self.reddit = praw.Reddit(client_id=self.client_id, client_secret=self.client_secret, user_agent='WebScraping')
def scrape(self):
#Blank list for hottest posts and their attributes
#posts = []
# obtain most recent posts from wallstreetbets with regard to GME
self.client_id, self.client_secret = get_keys("Reddit")
self.reddit = praw.Reddit(client_id=self.client_id, client_secret=self.client_secret, user_agent='WebScraping')
queried_posts = self.reddit.subreddit('wallstreetbets').search(self.stock_name,
self.sort_type,
self.time_filter,
limit=self.num_posts)
# Loop through 10 GME posts and print title
db = Database()
db.use_database('DB1')
post_id_list = []
for post in queried_posts:
# append post attributes to list
# posts.append([post.id, self.stock_name, post.title, post.score, post.subreddit, post.url, post.num_comments,
# post.selftext, post.created])
if len(db.query("SELECT * FROM POSTS WHERE POST_ID='{}';".format(post.id)))==0:
db.insert('POSTS', [post.id, self.stock_name, post.title, post.score, str(post.subreddit), post.url, post.num_comments,
post.selftext, -1, post.created])
post_id_list.append(post.id)
# Create Dataframe for top 10 hottest posts
# posts = pd.DataFrame(posts,columns=['post_id', 'stock_id', 'title', 'score', 'subreddit', 'url', 'num_comments', 'body', 'created'])
return post_id_list
def convert(self, df, training=False):
# Initialize dictionary
stock = []
db = Database()
db.use_database('DB1')
# Loop through all top posts
for i in tqdm(range(len(df))):
# Extract ID
ID = df[i]
#ID = df.post_id[i]
# Create submission object to extract comments for each post
submission = self.reddit.submission(id = ID)
submission.comments.replace_more(limit=0)
# Initialize list for commments
#comments = []
count = 0
# Loop through comments
for top_level_comment in submission.comments:
# append comments to list
if count<self.num_comments:
#comments.append(top_level_comment.body)
if len(db.query("SELECT * FROM COMMENTS WHERE COMMENT_ID='{}';".format(top_level_comment.id)))==0:
db.insert('COMMENTS', [top_level_comment.id, ID, self.stock_name, -1, top_level_comment.body])
else:
break
count+=1
if len(db.query("SELECT * FROM STOCKS WHERE STOCK_ID='{}';".format(self.stock_name)))==0:
db.insert('STOCKS', self.stock_name)
else:
db.update_last_scraped(self.stock_name)
return
def process(self):
self.convert(self.scrape())
return
class Stock:
"""
Class for pulling recent stock values
"""
def __init__(self):
self.start = int(time.mktime((datetime.datetime.now()- datetime.timedelta(days=1)).timetuple()))
self.end = int(time.time())
def set_start(self, date):
"""
Args:
date (list): [year (int), month (int), day (int), hour (int), minute (int), seconds (int)]
"""
self.start = self.create_unix_stamp(date[0], date[1], date[2], date[3], date[4], date[5])
def set_end(self, current=True, date=None):
"""
Args:
current (bool): if True, end is current date and time; if False must set desired end date and time
date (list): [year (int), month (int), day (int), hour (int), minute (int), seconds (int)]
"""
if current:
self.end = int(time.time())
else:
self.end = self.create_unix_stamp(date[0], date[1], date[2], date[3], date[4], date[5])
# Create unix timestamp
def create_unix_stamp(self, year, month, day, hour, minute, second):
"""
Args:
year (int): year
month (int): month
day (int): day
hour (int): hour
minute (int): minute
second (int): second
Method for creating unix time stamp from list
"""
dt = datetime.datetime(year, month, day, hour, minute, second)
return int(time.mktime(dt.timetuple()))
def convert(self, df):
"""
Args:
df (pd.DataFrame): dataframe to be converted
Method for converting dataframe to dictionary
"""
prices = {}
prices['_id'] = 0
prices['open'] = list(df.o)
prices['high'] = list(df.h)
prices['low'] = list(df.l)
prices['close'] = list(df.c)
prices['volume'] = list(df.v)
prices['timestamp'] = list(df.t)
prices['status'] = list(df.s)
return prices
def lag(self, feature, df):
df[feature+"_t1"] = [df.loc[df.timestamp.dt.day==i.day-1,['timestamp', feature]][feature].mean() for i in tqdm(df.timestamp)]
def pull_data(self, stock_name):
"""
Args:
stock_name (str): Name of stock for which to pull data
Method for scraping stock data
"""
self.api_key = get_keys("Finnhub")
self.finnhub_client = finnhub.Client(api_key=self.api_key)
res = self.finnhub_client.stock_candles(stock_name, '1', self.start, self.end)
df = | pd.DataFrame(res) | pandas.DataFrame |
import multiprocessing
import socket
from struct import unpack
from copy import deepcopy
from time import sleep
import time
import pandas as pd
import LIGHT
class UDPSocket(multiprocessing.Process):
def __init__(self, mem, IP, Port, shut_up = False):
multiprocessing.Process.__init__(self)
self.address = (IP, Port)
self.mem = mem
self.trigger_mem = mem[-1] # Clean mem -> 'Normal':True
# self.TSMS_mem = mem[-3]
self.old_CNS_data = deepcopy(self.mem[0]) # main mem copy
self.read_CNS_data = deepcopy(self.mem[0])
self.shut_up = shut_up
self.initial_save = False # ์ด๊ธฐ ์ํ๊ฐ ํธ์ถ๋ ๊ฒฝ์ฐ 1๋ฒ๋ง ์ ์ฅํ๋ ๊ธฐ๋ฅ
now = time.localtime()
s = "%02d-%02d_%02d_%02d_%02d_%02d" % (now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_min,
now.tm_sec)
self.save_file_name = '{}.csv'.format(s)
# ์ผ์ ์๊ฐ๋ง๋ค ๋ฐ์ดํฐ ์ ์ฅ ์ฉ ํ์ด๋จธ
self.history_min = time.localtime().tm_min
def run(self):
udpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpSocket.bind(self.address)
udpSocket.settimeout(5) # 5์ด ๋๊ธฐ ํ ์ฐ๊ฒฐ ์์ผ๋ฉด ์ฐ๊ฒฐ ์๋จ์ ํธ์ถ
# data, client = udpSocket.recvfrom(44388) # ์ต๋ ๋ฒํผ ์
while True:
# try:
data, client = udpSocket.recvfrom(44388)
# print(len(data))
pid_list = self.update_mem(data[8:]) # ์ฃผ์๊ฐ์ ๊ฐ์ง๋ 8๋ฐ์ดํธ๋ฅผ ์ ์ธํ ๋๋จธ์ง ๋ถ๋ถ
# ์ฝ๋๊ฐ ์ฒ์ ๋๋๋ง ๋์ ----------------------
if self.old_CNS_data['KCNTOMS']['L'] == []:
if not self.shut_up: print('List mem empty')
self.update_old_CNS_data()
# ----------------------------------------------
if self.read_CNS_data['KCNTOMS']['V'] == 0:
if self.old_CNS_data['KCNTOMS']['L'][-1] != self.read_CNS_data['KCNTOMS']['V']:
self.mem[-1]['Clean'] = True
if not self.shut_up: print(self, 'Memory clear')
dumy = deepcopy(self.mem[0])
for dumy_key in dumy.keys():
dumy[dumy_key]['L'].clear()
dumy[dumy_key]['D'].clear()
for _ in dumy.keys():
self.mem[0][_] = dumy[_]
self.old_CNS_data = deepcopy(self.mem[0]) # main mem copy
self.read_CNS_data = deepcopy(self.mem[0])
for __ in self.old_CNS_data.keys():
self.mem[0][__] = self.old_CNS_data[__]
else:
if not self.shut_up: print(self, 'initial stedy')
# ์ด๊ธฐ ์ํ์ธ ๊ฒฝ์ฐ ์์ง๋ ๋ฐ์ดํฐ๋ฅผ ์ ์ฅํ๊ธฐ์ํ ํ์ผ ์ด๋ฆ์ ์ ์ -------
now = time.localtime()
s = "%02d-%02d_%02d_%02d_%02d_%02d" % (now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_min,
now.tm_sec)
self.save_file_name = '{}.csv'.format(s)
self.initial_save = False # ์ด๊ธฐ ์ํ๊ฐ ํธ์ถ๋ ๊ฒฝ์ฐ 1๋ฒ๋ง ์ ์ฅํ๋ ๊ธฐ๋ฅ
# ---------------------------------------------------------
for __ in self.old_CNS_data.keys():
self.old_CNS_data[__]['V'] = self.read_CNS_data[__]['V'] # ์ด๊ธฐ ์ํ์ฌ๋ V๋ ์
๋ฐ์ดํธ
self.mem[0][__] = self.old_CNS_data[__]
else: # not 0
if self.old_CNS_data['KCNTOMS']['D'][-1] != self.read_CNS_data['KCNTOMS']['V']:
if not self.shut_up: print(self, 'run CNS')
self.initial_save = False # run์์ stop์ผ๋ก ์ํ๊ฐ ๋ฐ๋๋ ๋ฐ์ดํฐ ์ ์ฅ์ ์ํด์ ์ฌ์ฉ
# ์ผ์ ์๊ฐ๋ง๋ค ๋ฐ์ดํฐ ์ ์ฅํ๋ ํํธ -------
if self.history_min != time.localtime().tm_min:
self.save_file(self.save_file_name)
self.history_min = time.localtime().tm_min #timer ์
๋ฐ์ดํฐ
# ------------------------------------------
self.update_old_CNS_data()
for __ in self.old_CNS_data.keys():
self.mem[0][__] = self.old_CNS_data[__]
else:
if not self.shut_up: print(self, 'stop CNS')
self.save_file(self.save_file_name)
pass
# except Exception as e:
# print(e)
def update_mem(self, data):
pid_list = []
# print(len(data)) data์ 8๋ฐ์ดํธ๋ฅผ ์ ์ธํ ๋๋จธ์ง ๋ฒํผ์ ํฌ๊ธฐ
for i in range(0, len(data), 20):
sig = unpack('h', data[16 + i: 18 + i])[0]
para = '12sihh' if sig == 0 else '12sfhh'
pid, val, sig, idx = unpack(para, data[i:20 + i])
pid = pid.decode().rstrip('\x00') # remove '\x00'
if pid != '':
self.read_CNS_data[pid] = {'V': val, 'type': sig}
# self.read_CNS_data[pid] = {'V': val, 'type': sig, 'N_V': val}
pid_list.append(pid)
return pid_list
def append_value_to_old_CNS_data(self, key, value):
self.old_CNS_data[key]['V'] = value
if LIGHT.LIGHT:
if len(self.old_CNS_data[key]['L']) > 0:
pass
else:
self.old_CNS_data[key]['L'].append(value)
else:
self.old_CNS_data[key]['L'].append(value)
self.old_CNS_data[key]['D'].append(value)
def update_other_state_to_old_CNS_data(self, para, value):
for length in range(0, len(para)):
self.append_value_to_old_CNS_data(key=para[length], value=value[length])
def update_old_CNS_data(self):
for _ in self.read_CNS_data.keys():
self.append_value_to_old_CNS_data(key=_, value=self.read_CNS_data[_]['V'])
if _ == 'KFZRUN': break # CNS์์ ์ ๊ณตํ๋ DB๊น์ง ์ฝ๊ณ ๋๋จธ์ง๋ ์๋์์ ์๋์ผ๋ก ์
๋ฐ์ดํธ
# ์ ์ ์ํ ๋ผ๋ฒจ๋ง ์
๋ฐ์ดํธ
temp_list = [1, 0] if self.trigger_mem['Normal'] else [0, 1]
self.update_other_state_to_old_CNS_data(['Normal_0', 'Normal_1'], temp_list)
# ๋น์ ์ํ ๋ผ๋ฒจ๋ง ์
๋ฐ์ดํธ
temp_list = [0, 0, 0, 0, 0, 0, 0]
temp_list[self.trigger_mem['Accident_nb']] = 1
self.update_other_state_to_old_CNS_data(['Accident_0', 'Accident_1', 'Accident_2', 'Accident_3',
'Accident_4', 'Accident_5'], temp_list)
def save_file(self, file_name):
if not self.initial_save:
if not self.shut_up: print(self, 'CNS file save')
temp = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: mmahmed
"""
################################
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import matplotlib
import deepchem
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
from rdkit.Chem import Descriptors
from deepchem.feat import Featurizer
import pickle
from keras.models import model_from_json
import os
import sys
######
from rdkit import Chem,rdBase
from rdkit.Chem import PandasTools
import keras
import keras.utils
from datetime import date
today = date.today()
import argparse
###################################################################################################
def input_file(path):
"""Check if input file exists."""
path = os.path.abspath(path)
if not os.path.exists(path):
raise IOError('File %s does not exist.' % path)
return path
def output_file(path):
"""Check if output file can be created."""
path = os.path.abspath(path)
dirname = os.path.dirname(path)
if not os.access(dirname, os.W_OK):
raise IOError('File %s cannot be created (check your permissions).'
% path)
return path
def string_bool(s):
s = s.lower()
if s in ['true', 't', '1', 'yes', 'y','True']:
return True
elif s in ['false', 'f', '0', 'no', 'n', 'False']:
return False
else:
raise IOError('%s cannot be interpreted as a boolean' % s)
################################################################################################
os.chdir('.')
##It is important to know which pandas versions you are using in order to avoid syntax mismatch.
##The script should work fine with pandas versions 0.22 & 0.23 & ..
print ('PANDAS version ',pd.__version__)
print('########################')
print(''' The following python librares are required to run the models:
* Python 2.7 (tested with Anaconda distribution, Linux OS (Mint 19.1 for a local PC, and 3.10.0-957.12.2.el7.x86_64 GNU/Linux for ComputeCanada clusters). Running the models on MAC may be cumbersome because of the recent XGBoost updates. We did not test the prediction on Windows.)
* DeepChem 1.x (Require RDKit)
* Pandas (Prediction is tested with Pandas 0.22)
* Tensorflow 1.3
* Keras
* XGBoost
* ScikitLearn
''')
print('########################')
print('''In order for the script to run, and in addition to the input file (see below), the following files should exist in the running directory:
* dl_model_fp.json
* dl_model_fp.h5
* mlp_rdkit_classify_fp.sav
* xgb_rdkit_classify_fp.sav
* rfc_rdkit_classify_fp.sav
* svm_rdkit_classify_fp.sav
* coded_gpcr_list.csv
NB: The rfc_rdkit_classify_fp.sav & svm_rdkit_classify_fp.sav & mlp_rdkit_classify_fp.sav models are required only if the [--ignore_rf_svm argument] option in the script is set to False (True is the default behaviour)\n. The models are not deposited in the github repository because of size limits, to get these two models a direct request should be sent to <EMAIL> & <EMAIL>
''')
print('########################')
print ('Welcome to GPCR_LigandClassify, this is how you can use the program using the models to make novel predictions, we hope you find these predictions useful for your task: \n\
python GPCR_LigandClassify.py --input_file input.csv --output_file output.csv [--n_rows_to_read <INTEGER>] [--mwt_lower_bound <FLOAT>] [--mwt_upper_bound <FLOAT>] [--logp_lower_bound <FLOAT>] [--logp_upper_bound <FLOAT>] [--ignore_rf_svm <True/False>]')
print('########################')
print('The input & output file names arguments are mandatory arguments, --n_rows_to_read argument determines how many rows you want to read from the input CSV files (default 9999999999 rows)\n, the rest are optional with default same as input dataset used for models training.')
print('########################')
print('The --ignore_rf_svm argument will ignore the RF and the SVM models which are pretty large, suitable in case of limited computational resourcses, particularly memory. Default is True (Ignore Randomforests and SVM models.)')
print('########################')
print('Please note that a today date string will be attached to the output file name.')
print('########################')
print('Please note that the script will only save ligands where all models predictions agree.')
print('########################')
print('For the input file, please keep the same format as the attached sample input file. In case of data coming from different source, you can populate the rest of columns with fake data.\nWith the exception of the SMILES column, other columns may be left blank (not recommended).')
print('########################')
print('For the models and auxiliary files, please visit the following github repository:\n\
https://github.com/mmagithub/GPCR_LigandClassify')
print('########################')
##############Read Input/output file names#####################################
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', help = "input filename", type = input_file)
parser.add_argument('--output_file', help = "output filename", type = output_file)
parser.add_argument('--n_rows_to_read', help = "Number of rows to read from the input structures file", default=9999999999,type=int)
parser.add_argument('--mwt_lower_bound', help = "Lowest molecular weight to consider", default=100,type=float)
parser.add_argument('--mwt_upper_bound', help = "Highest molecular weight to consider", default=900,type=float)
parser.add_argument('--logp_lower_bound', help = "Lowest LogP to consider", default=-4,type=float)
parser.add_argument('--logp_upper_bound', help = "Highest LogP to consider", default=10,type=float)
parser.add_argument('--ignore_rf_svm', help = "Ignore RF and SVM models, suitable for small computational resources, default is True", default=True,type=string_bool)
args = parser.parse_args()
print('inputfile:', args.input_file)
OUTPUT_FILE = args.output_file.split('.')[0] + "_{0}.csv".format(today)
print('outputfile:', "{0}".format(OUTPUT_FILE))
STRUCTURES_FILE = args.input_file
###############################################################################
#Here add some constants
#STRUCTURES_FILE = r"drug_bank_structures.csv"
ROWS_TO_READ = args.n_rows_to_read
###############################################################################
MWT_LOWER_BOUND = args.mwt_lower_bound
MWT_UPPER_BOUND = args.mwt_upper_bound
LOGP_LOWER_BOUND = args.logp_lower_bound
LOGP_UPPER_BOUND = args.logp_upper_bound
###############################################################################
###############Adding Descriptors And FingerPrints#############################
########[1] Adding FingerPrints################
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw, PyMol, rdFMCS
from rdkit.Chem.Draw import IPythonConsole
from rdkit import rdBase
####Finger Prints Based Predications####################
####Create FingerPrints#################################
from rdkit.Chem import AllChem
from rdkit import DataStructs
class FP:
def __init__(self, fp):
self.fp = fp
def __str__(self):
return self.fp.__str__()
def computeFP(x):
#compute depth-2 morgan fingerprint hashed to 1024 bits
fp = AllChem.GetMorganFingerprintAsBitVect(x,2,nBits=1024)
res = np.zeros(len(fp),np.int32)
#convert the fingerprint to a numpy array and wrap it into the dummy container
DataStructs.ConvertToNumpyArray(fp,res)
return FP(res)
###############################################################################
def addExplFP(df,molColumn):
fpCache = []
for mol in df[molColumn]:
res = AllChem.GetMorganFingerprintAsBitVect(mol,4,nBits=2048)
fpCache.append(res)
arr = np.empty((len(df),), dtype=np.object)
arr[:]=fpCache
S = | pd.Series(arr,index=df.index,name='explFP') | pandas.Series |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -- coding: utf-8 --'
import pandas as pd
import numpy as np
import os
import textwrap
import string
import unicodedata
import sys
import sqlite3
import easygui
import re
import copy
import json
import xlsxwriter
# import pyanx
MAX_TAM_LABEL = 100 # nro mรกximo de caracteres nos labels
PALETA = {'vermelho':'#e82f4c', 'laranja':'#ea7e16', 'amarelo':'#f5d516', 'verde': '14bd11', 'azul':'#0b67d0', 'roxo':'#6460aa'}
PALE_TAB = {
'laranja' :['#FF6D00','#FF9800','#FFB74D','#FFECB3'],
'verde' :['#00C853','#8BC34A','#AED581','#DCEDC8'],
'azul' :['#2962FF','#2196F3','#64B5F6','#BBDEFB'],
'rosa' :['#7B1FA2','#9C27B0','#BA68C8','#E1BEE7'],
'ciano' :['#00B8D4','#00BCD4','#4DD0E1','#B2EBF2'],
'roxo' :['#6200EA','#673AB7','#9575CD','#D1C4E9'],
'amarelo' :['#FFD600','#FFEB3B','#FFF176','#FFF9C4'],
'vermelho':['#d50000','#f44336','#e57373','#ffcdd2'],
'marrom' :['#5D4037','#795548','#A1887F','#D7CCC8'],
'cinza' :['#455A64','#607D8B','#90A4AE','#CFD8DC']
}
PALE_TAB_CORES = [cor for cor in PALE_TAB.keys()]
TAM_PALETA_CORES = len(PALE_TAB_CORES) - 3 # ultimos 3 sao reservados
def definir_cor(nro: int) -> str:
nro_cor = nro % (len(PALE_TAB) - 3) # 'vermelho, marrom e cinza sรฃo reservados
return (PALE_TAB_CORES[nro_cor])
class estrutura: # especificaรงรตes das planilhas
def __init__(self, nome="", estr=[], pasta="./"):
self.nome = nome
self.estr = estr
self.pasta = pasta
self.nome_rif = ''
def mudar_pasta(self, pasta):
self.pasta = pasta
def xlsx(self):
return self.nome + ".xlsx"
def csv(self):
return 'RIF'+self.nome_rif+'_'+self.nome + ".csv"
def estr_upper(self):
result = []
for elem in self.estr:
result.append(elem.upper())
return result
def nomearq(self):
return os.path.join(self.pasta, self.xlsx())
def nomearqcsv(self):
return os.path.join(self.pasta, self.csv())
def arquivo_existe(self):
if (
self.nome.upper() == "grupos".upper()
or self.nome.upper() == "vinculos".upper()
): # um novo รฉ criado vazio, uma vez que nรฃo vem do COAF
return True
else:
return os.path.isfile(self.nomearq())
def estr_compativel(self, outra_estr=[]):
ok = all(elem.upper() in self.estr_upper() for elem in outra_estr)
if not ok:
print(self.estr)
print(outra_estr)
return ok
def exibir(self):
strestr = ",".join(self.estr)
return self.nome + ": " + strestr
def csv2xlsx(self):
nomecsv = self.nomearqcsv()
df = pd.read_csv(nomecsv, sep=';', header=0, dtype=str, encoding='latin1',index_col=False )
try:
df.to_excel(self.nomearq(),index=False)
except:
print('Erro gerando XLSX de entrada')
def help_estruturas(estruturas):
print("Estruturas esperadas das planilhas:")
for e in estruturas:
print(" " + e.exibir())
class log:
def __init__(self):
self.logs = u""
def gravalog(self, linha):
print(linha)
self.logs += linha + "\n"
def lelog(self):
return self.logs
class nodo:
def __init__(self, id, label, tipo="ENT", tooltip="", fonte="RIF"):
self.id = id
self.tipo = tipo
self.label = label
self.cor = "Silver"
self.sexo = 0
self.m1 = 0
self.m2 = 0
self.situacao = ""
self.dataOperacao = ""
self.texto_tooltip = tooltip
self.fonte = fonte
self.camada = 5 if self.fonte == "RIF" else 5
def todict(self):
return {
"id": self.id,
"tipo": self.tipo,
"sexo": self.sexo,
"label": self.label,
"camada": self.camada,
"situacao": self.situacao,
"cor": self.cor,
"texto_tooltip": self.texto_tooltip,
"m1": self.m1,
"m2": self.m2,
"m3": 0,
"m4": 0,
"m5": 0,
"m6": 0,
"m7": 0,
"m8": 0,
"m9": 0,
"m10": 0,
"m11": 0,
"dataoperacao": self.dataOperacao,
}
class noPF(nodo):
def __init__(self, id, label="", cor="Silver", sexo=0, fonte="RIF"):
nodo.__init__(self, id, label, "PF")
self.cor = cor
self.sexo = sexo
def todict(self):
return nodo.todict(self)
class noPJ(nodo):
def __init__(self, id, label="", cor="Silver", fonte="RIF"):
nodo.__init__(self, id, label, "PJ")
self.cor = cor
self.sexo = 1
class noConta(nodo):
def __init__(self, id, label="CONTA", cor=PALE_TAB['verde'][0]):
nodo.__init__(self, id, label, "CCR")
self.cor = cor
class noGrupo(nodo):
def __init__(self, id, label="GRUPO", cor=PALE_TAB['azul'][0]):
nodo.__init__(self, id, label, "GR")
self.cor = cor
self.fonte = "grupos"
class noComunicacao(nodo):
def __init__(self, id, label="COMUNICACAO", cor=PALE_TAB['marrom'][1], dataOperacao=None):
nodo.__init__(self, id, label, "COM")
self.cor = cor
# self.dataOperacao=dataOperacao
class aresta:
def __init__(self, origem, destino, descricao="", cor="Silver", fonte="RIF"):
self.origem = origem
self.destino = destino
self.descricao = descricao
self.cor = cor
self.fonte = fonte
self.camada = 5 if self.fonte == "RIF" else 5
def todict(self):
return {
"origem": self.origem,
"destino": self.destino,
"cor": self.cor,
"camada": self.camada,
"tipoDescricao": {"0": self.descricao},
}
lg = log()
com = estrutura(
"Comunicacoes",
[
"Indexador",
"idComunicacao",
"NumeroOcorrenciaBC",
"Data_do_Recebimento",
"Data_da_operacao",
"DataFimFato",
"cpfCnpjComunicante",
"nomeComunicante",
"CidadeAgencia",
"UFAgencia",
"NomeAgencia",
"NumeroAgencia",
"informacoesAdicionais",
"CampoA",
"CampoB",
"CampoC",
"CampoD",
"CampoE",
"CodigoSegmento",
],
)
env = estrutura(
"Envolvidos",
[
"Indexador",
"cpfCnpjEnvolvido",
"nomeEnvolvido",
"tipoEnvolvido",
"agenciaEnvolvido",
"contaEnvolvido",
"DataAberturaConta",
"DataAtualizacaoConta",
"bitPepCitado",
"bitPessoaObrigadaCitado",
"intServidorCitado",
],
)
oco = estrutura("Ocorrencias", ["Indexador", "idOcorrencia", "Ocorrencia"])
# opcionais
gru = estrutura("Grupos", ["cpfCnpjEnvolvido", "nome_Envolvido", "Grupo", "Detalhe", "Analise"])
vin = estrutura(
"Vinculos",
[
"cpfCnpjEnvolvido",
"nome_Envolvido",
"cpfCnpjVinculado",
"nome_Vinculado",
"Descricao",
],
)
estruturas = [com, env, oco, gru, vin]
# help_estruturas(estruturas)
def removeAcentos(data):
if data is None:
return u""
# if isinstance(data,str):
# data = unicode(data,'latin-1','ignore')
return "".join(
x for x in unicodedata.normalize("NFKD", data) if x in string.printable
)
def gerar_planilha(arquivo, df, nome, indice=False):
def formatar_cabecalho(cor):
return arquivo.book.add_format(
{
"bold": True,
"text_wrap": True,
"valign": "top",
"fg_color": cor,
"border": 1,
}
)
# Palette URL: http://paletton.com/#uid=43K0I0kw0w0jyC+oRxVy4oIDfjr
PALETA = [
"#5778C0",
"#a4b3b6",
"#FF8D63",
"#FFE700",
"#FFA900",
"#000000",
] # azul, cinza, verm, amarelo, lara, preto
COR_PRINCIPAL = PALETA[0]
COR_NEUTRA_CLARA = PALETA[1]
COR_SECUNDARIA = PALETA[2]
COR_TERCIARIA = PALETA[4]
COR_NEUTRA_ESCURA = PALETA[5]
df.style.bar(color=COR_PRINCIPAL)
print("antes " + nome)
df.to_excel(arquivo, sheet_name=nome, index=indice)
print("depois " + nome)
# Write the column headers with the defined format.
# print(df.index.names)
if len(arquivo.sheets) > 6:
cor_basica = COR_SECUNDARIA
elif len(arquivo.sheets) < 3:
cor_basica = COR_PRINCIPAL
else:
cor_basica = COR_NEUTRA_CLARA
if not indice:
for col_num, value in enumerate(df.columns.values):
arquivo.sheets[nome].write(
0, col_num, value, formatar_cabecalho(cor_basica)
)
arquivo.sheets[nome].set_tab_color(cor_basica)
else:
for col_num, value in enumerate(df.index.names):
arquivo.sheets[nome].write(
0, col_num, value, formatar_cabecalho(cor_basica if value != 'Analise' else COR_SECUNDARIA)
)
for col_num, value in enumerate(df.columns.values):
arquivo.sheets[nome].write(
0,
col_num + len(df.index.names),
value,
formatar_cabecalho(COR_NEUTRA_CLARA),
)
arquivo.sheets[nome].set_tab_color(cor_basica)
def gerar_planilhaXLS(arquivo, df, nome, indice=False):
df.style.bar(color="#99ccff")
df.to_excel(arquivo, sheet_name=nome, index=indice)
def tipoi2F(umou2=1, linha=None, carJuncao="\r "):
print("linha= ", linha)
descricao = linha[1 if umou2 == 1 else 3]
# if descricao == '': #telefone ou endereco
# descricao = carJuncao.join(node[4:].split('__'))
# else:
# if self.GNX.node[node]['tipo'] !='TEL':
# descricao = Obj.parseCPFouCNPJ(node) + carJuncao + carJuncao.join(textwrap.wrap(descricao,30))
# dicTipo = {'TEL':u'Telefone', 'END':u'Local', 'PF':u'PF', 'PJ':u'PJ', 'PE':u'Edifรญcio', 'ES':u'Edifรญcio', 'CC':u'Conta','INF':u'Armรกrio' }
tipo = linha[7 if umou2 == 1 else 8]
# tipoi2 = dicTipo[tipo]
tipoi2 = u"Escritรณrio"
if tipo in ("TEL", "END", "CC"):
descricao = ""
else:
descricao = carJuncao.join(textwrap.wrap(descricao, 30))
sexo = 1
if tipo == "PF":
# if self.GNX.node[node]['sexo']==1:
if not sexo or sexo == 1:
tipoi2 = u"Profissional (masculino)"
elif sexo == 2:
tipoi2 = u"Profissional (feminino)"
elif tipo == "PJ":
# if node[8:12]!='0001':
# if sexo != 1: #1=matriz
if sexo % 2 == 0: # 1=matriz
tipoi2 = u"Apartamento" # filial de empresa
else:
tipoi2 = u"Escritรณrio"
elif tipo == "PE":
tipoi2 = u"Oficina"
corSituacao = linha[9 if umou2 == 1 else 10]
if linha[4 if umou2 == 1 else 5] == 0:
corSituacao = "Vermelho"
return (tipoi2, descricao, corSituacao)
def to_i2(df, arquivo=None):
dicTiposIngles = {
u"Profissional (masculino)": u"Person",
u"Profissional (feminino)": u"Woman",
u"Escritรณrio": u"Office",
u"Apartamento": u"Workshop",
u"Governo": u"House",
u"Casa": u"House",
u"Loja": u"Office",
u"Oficina": u"Office",
u"Telefone": u"Phone",
u"Local": u"Place",
u"Conta": u"Account",
u"Armรกrio": u"Cabinet",
u"Edifรญcio": u"Office",
}
# chart = Pyanx_macros()
noi2origem = {}
noi2destino = {}
for idc, campos in df.iterrows():
# print('campos= ',campos)
tipo, descricao, corSituacao = tipoi2F(linha=campos, umou2=1, carJuncao=" ")
noi2origem[idc] = chart.add_node(
entity_type=dicTiposIngles.get(tipo, ""),
label=(campos["cpfcnpj1"]) + u"-" + (descricao),
)
tipo, descricao, corSituacao = tipoi2F(linha=campos, umou2=2, carJuncao=" ")
noi2destino[idc] = chart.add_node(
entity_type=dicTiposIngles.get(tipo, ""),
label=(campos["cpfcnpj1"]) + u"-" + (descricao),
)
nomeLigacao = campos["descriรงรฃo"]
chart.add_edge(noi2origem[idc], noi2destino[idc], removeAcentos(nomeLigacao))
# idc += 1
fstream = chart.createStream(
layout="spring_layout", iterations=0
) # nรฃo calcula posiรงรฃo
retorno = fstream.getvalue()
fstream.close()
if arquivo is not None:
f = open(arquivo, "w")
f.write(retorno)
f.close()
return retorno
def soDigitos(texto):
return re.sub("[^0-9]", "", texto)
def estimarFluxoDoDinheiro(tInformacoesAdicionais):
# normalmente aparece algo como R$ 20,8 Mil enviada para Jardim Indรบstria e Comรฉrcio - CNPJ 606769xxx
# inicialmente quebramos o texto por R$ e verifica quais sรฃo seguidos por CPF ou CNPJ
# pega o texto da coluna InformacoesAdicionais do arquivo Comunicacoes.csv e tenta estimar o valor para cada cpf/cnpj
# normalmente aparece algo como R$ 20,8 Mil enviada para Indรบstria e Comรฉrcio - CNPJ 6067xxxxxx
# inicialmente quebramos o texto por R$ e verifica quais sรฃo seguidos por CPF ou CNPJ
# retorna dicionรกrio
# como {'26106949xx': 'R$420 MIL RECEBIDOS, R$131 MIL POR', '68360088xxx': 'R$22 MIL, RECEBIDAS'}
# lista = re.sub(' +', ' ',tInformacoesAdicionais).upper().split('R$')
t = re.sub(" +", " ", tInformacoesAdicionais).upper()
lista = t.split("R$")
listaComTermoCPFCNPJ = []
for item in lista:
if "CPF" in item or "CNPJ" in item:
listaComTermoCPFCNPJ.append(item.strip())
listaValores = []
valoresDict = {}
for item in listaComTermoCPFCNPJ:
valorPara = ""
cpn = ""
le = item.split(" ")
valor = "R$" + le[0] # + ' ' + le[1] # + ' ' + le[2]
if le[1].upper().rstrip(",").rstrip("S").rstrip(",") in (
"MIL",
"MI",
"RECEBIDO",
"RECEBIDA",
"ENVIADA",
"RETIRADO",
"DEPOSITADO",
"CHEQUE",
):
valor += " " + le[1]
if le[2].upper().rstrip(",").rstrip("S") in (
"MIL",
"MI",
"RECEBIDO",
"RECEBIDA",
"ENVIADA",
"RETIRADO",
"DEPOSITADO",
"CHEQUE",
):
valor += " " + le[2]
if "CPF" in item:
aux1 = item.split("CPF ")
try:
aux2 = aux1[1].split(" ")
cpn = soDigitos(aux2[0])
except:
pass
elif "CNPJ" in item:
aux1 = item.split("CNPJ ")
try:
aux2 = aux1[1].split(" ")
cpn = soDigitos(aux2[0])
except:
pass
if cpn:
listaValores.append(valorPara)
if cpn in valoresDict:
v = valoresDict[cpn]
v.add(valor)
valoresDict[cpn] = v
else:
valoresDict[cpn] = set([valor])
d = {}
for k, v in valoresDict.items():
d[k] = ", ".join(v)
return d
# .def estimaFluxoDoDinheiro(t):
def consolidar_pd(pasta):
"""Processa as planilhas comunicacoes, envolvidos, ocorrencias e grupo em planilhas com agrupamento """
arq = com.nomearq() # Comunicacoes
nome_rif = com.nome_rif
try:
df_com = pd.read_excel(
arq, options={"strings_to_numbers": False}, converters={"Indexador": str}
)
df_com["Indexador"] = pd.to_numeric(df_com["Indexador"], errors="coerce")
df_com["Data_da_operacao"] = pd.to_datetime(df_com["Data_da_operacao"])
if not com.estr_compativel(df_com.columns):
print(com.estr_upper())
mostra_erro("O arquivo " + arq + " contรฉm colunas incompatรญveis: ")
raise ("Estrutura incompatรญvel")
lg.gravalog("Arquivo " + arq + " lido.")
except Exception as exc:
print("Erro ao ler o arquivo " + arq + "\n" + str(type(exc)))
arq = env.nomearq() # Envolvidos
try:
df_env = pd.read_excel(
arq, options={"strings_to_numbers": False}, converters={"Indexador": str}
)
df_env["Indexador"] = pd.to_numeric(df_env["Indexador"], errors="coerce")
df_env = df_env[ | pd.notnull(df_env["Indexador"]) | pandas.notnull |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
( | TS('2015-01-02') | pandas.Timestamp |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#import seaborn as sns
import glob
from parse import parse
#defaultdict to use nested dictionaries
from collections import defaultdict
#quantiles calculation
from scipy.stats.mstats import mquantiles
#datetime conversion
from dateutil import parser
#statistical tools
from statsmodels import robust
import statsmodels.api as sm
#dates
import matplotlib.dates as mdates
#patches for legend
import matplotlib.patches as mpatches
from matplotlib.patches import Patch
#for legend to avoid repeating duplicates labels
from collections import OrderedDict
import seaborn as sns
#calculate area under curve for ROC curve:
from sklearn.metrics import auc
#find local peaks of a 2d function
from scipy.signal import find_peaks
#decide color series
import itertools
def dictionary(fileformat='std', pattern="/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_{simul_time}/cosmoe_{something}_{Name}/{otherstuff}",
folders_pattern = '/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_*'):
"""
Open every simulations run for different starting simulation times and create a dictionary of dataframes nested in
this way: based on the simulation time, choosing the realization (e.g. 'rm00_pin01') you have a dataframe of different
paramters and a number of rows given by the hourly time points
"""
#create a nested dictionary with two 'levels' to contain a list of dataframes for every simulation time
#and every ensemble member
nested_dict = lambda: defaultdict(nested_dict)
nested_df_collection = nested_dict()
#pattern to rename every dataframe (w different 'filepath') of the collection by the name of the simulation
#pattern = "/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_{simul_time}/cosmoe_{something}_{Name}/{otherstuff}"
#sim_dates: string array to store the renamed 'filetime' variables with the initialization time of the simulation
sim_dates = ["" for filetime in sorted(glob.iglob(folders_pattern))]
i = 0
#conditions on fileformat given in input to write the dataframes in the dictionary:
if fileformat == 'q':
skiprows = [1]
usecols = range(12)
columns = ['year', 'month', 'day', 'hour', 'RTOT', 'RTOT (l s-1 )', 'R0', 'R1', 'R2', 'RG1', 'RG2', 'RG3']
if fileformat == 'std':
skiprows = [0,1]
usecols = range(20)
columns = ['year', 'month', 'day', 'hour', 'NRTFL', 'P-uk', 'P-kor', 'P-SNO', 'EPOT', 'EREA', 'RO', 'R1', 'R2', 'RGES', 'S-SNO', 'SI', 'SSM', 'SUZ', 'SLZ', '??1', '??2', '??3', '??4']
#for loop for every simulation made at different times
for filetime in sorted(glob.iglob(folders_pattern)):
#for loop to read every *.std/*.q file in every subdirectory present, sorted by name, and to create an array of
#dataframes
#(all data in files *.q except RTOT (l s-1) are dimensioned in mm/h)
#before that, if condition for distinguish different patterns considering the forecasts or the prec obs
if folders_pattern == '/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_*':
subfold = '/*/*.'
elif folders_pattern == '/home/ciccuz/hydro/PrecObs/cosmo1_*':
subfold = '/*.'
for filepath in sorted(glob.iglob(filetime + subfold + fileformat)):
nested_df_collection[filetime][filepath] = pd.DataFrame(pd.read_csv(filepath, skiprows=skiprows,
delim_whitespace=True, header=None,
names=columns,
usecols=usecols))
if fileformat == 'q':
nested_df_collection[filetime][filepath].columns = columns
#add complete date column to every dataframe
nested_df_collection[filetime][filepath]['date'] = pd.to_datetime(nested_df_collection[filetime]
[filepath][['year', 'month', 'day',
'hour']])
# If considering ensemble members: change name of every dataframe ('filepath') of the dictionary by its
# simulation name (depending on ensemble member and parameter set used)
if folders_pattern == '/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_*':
newname_filepath = parse(pattern + fileformat, filepath)
nested_df_collection[filetime][newname_filepath['Name']] = nested_df_collection[filetime].pop(filepath)
elif folders_pattern == '/home/ciccuz/hydro/PrecObs/cosmo1_*':
newname_filepath = parse(pattern + fileformat, filepath)
nested_df_collection[filetime][newname_filepath['otherstuff']] = nested_df_collection[filetime].pop(filepath)
#change name of every simulation time ('filetime') substituting it with the date of the simulation
#locate characters for year, month, day, hour in filetime strings
#if condition to account for cosmoe data or cosmo1 (for prec obs):
if folders_pattern == '/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_*' :
sim_year = filetime[50:54] #[70:74] second ones used for longer file patterns i.e. located in deeper subfolders
sim_month = filetime[54:56] #[74:76]
sim_day = filetime[56:58] #[76:78]
sim_hour = filetime[58:60] #[78:80]
#condition on hour: 00 or 12 UTC simulation start
if sim_hour[0] == '0':
sim_hour = '00'
else:
sim_hour = '12'
elif folders_pattern == "/home/ciccuz/hydro/PrecObs/cosmo1_*":
sim_year = filetime[34:38]
sim_month = filetime[38:40]
sim_day = filetime[40:42]
sim_hour = filetime[42:44]
if sim_hour[0] == '0':
sim_hour = '00'
sim_dates[i] = (sim_year+'-'+sim_month+'-'+sim_day+' '+sim_hour+':00:00')
nested_df_collection[sim_dates[i]] = nested_df_collection.pop(filetime)
i = i+1
return nested_df_collection
def prec_obs_series():
'''
Read all the precipitation data obtained by a combination of COSMO1 and pluviometer data to obtain a precipitation series
to be used as observation series.
WARNING: for the day 2-11-2018 the data at 12:00 is missing!
'''
# Create a dictionary of all precipitation datasets (obtained with COSMO1) present at different sim_start
prec_obs_df = dictionary(pattern="/home/ciccuz/hydro/PrecObs/cosmo1_{simul_time}/{otherstuff}",
folders_pattern = '/home/ciccuz/hydro/PrecObs/cosmo1_*')
# Create a dataframe that will contain the "observed" precipitation series obtained by the different simulations/pluviometer
# data interpolated of precipitation by taking the first 12 hours for every series in prec_obs_df and concatenate all of them
obs_prec = pd.DataFrame(columns = ['year', 'month', 'day', 'hour', 'P-uk', 'P-kor', 'date'])
#array of dates to consider every simulation start at 12 utc from 23-10 to 9-11 2018
sim_starts = ['2018-10-23 12:00:00', '2018-10-24 12:00:00', '2018-10-25 12:00:00', '2018-10-26 12:00:00',
'2018-10-27 12:00:00', '2018-10-28 12:00:00', '2018-10-29 12:00:00', '2018-10-30 12:00:00',
'2018-10-31 12:00:00', '2018-11-01 12:00:00', '2018-11-02 13:00:00', '2018-11-03 12:00:00',
'2018-11-04 12:00:00', '2018-11-05 12:00:00', '2018-11-06 12:00:00', '2018-11-07 12:00:00',
'2018-11-08 12:00:00', '2018-11-09 12:00:00']
i=0
for sim_start in sim_starts:
prec_set = prec_obs_df[sim_start]['Ver500.']
#Compute the subset to consider just the 24 h above the initialization time:
#to do so we need to do some if conditions because on the 2-11 the simulation starting at 12 is not present!
if sim_start == '2018-11-01 12:00:00' :
prec_subset = prec_set.loc[(prec_set.date >= sim_start) & (prec_set.index <= 443)].drop(['NRTFL', 'P-SNO', 'EPOT', 'EREA',
'RO', 'R1', 'R2', 'RGES', 'S-SNO', 'SI', 'SSM', 'SUZ', 'SLZ', '??1'], axis=1)
prec_subset.index = range(i*24,i*24+24+1)
if sim_start == '2018-11-02 13:00:00':
prec_subset = prec_set.loc[(prec_set.date >= sim_start) & (prec_set.index <= 442)].drop(['NRTFL', 'P-SNO', 'EPOT', 'EREA',
'RO', 'R1', 'R2', 'RGES', 'S-SNO', 'SI', 'SSM', 'SUZ', 'SLZ', '??1'], axis=1)
prec_subset.index = range(i*24+1,i*24+24)
else:
prec_subset = prec_set.loc[(prec_set.date >= sim_start) & (prec_set.index <= 442)].drop(['NRTFL', 'P-SNO', 'EPOT', 'EREA',
'RO', 'R1', 'R2', 'RGES', 'S-SNO', 'SI', 'SSM', 'SUZ', 'SLZ', '??1'], axis=1)
prec_subset.index = range(i*24,i*24+24)
obs_prec = pd.concat([obs_prec, prec_subset])
i=i+1
return obs_prec
def ensemble_df(df, sim_start, Verzasca_area, variable_type):
"""
Create a dataframe containing all the different realizations, based on the dictionary created before,
on simulation start time and on the variable in which we are interested.
The resulting dataframe will have a number of column = # realizations (525 for all the combinations
of realizations) and a number of rows given by the total lead time expressed in hours (120 h for our case)
"""
#initialize the dataframe that contains all the realizations for a particular variable
ens_df = pd.DataFrame()
#index to account for the right dates without accounting them more than one time
j=0
#initialization of array to store the 120 hours dates
date_array = ["" for x in range(121)]
#condition on the variable chosen to convert discharge in m3/s:
if (variable_type == 'RTOT') or (variable_type == 'RGES'):
conv_factor = Verzasca_area/(1000.0*3600.0)
else:
conv_factor = 1.0
#for cycle on different members/paramsets
for member in df[sim_start].keys():
#for cycle on different dates
for date in df[sim_start][member]['date']:
#series of if conditions to account for the 120 hours just after the initialization point and not before
#case if we are on the same month -> must consider month and day
if (str(date)[5:7] == sim_start[5:7]):
#case if we are on the same day -> must consider hour
if (str(date)[8:10] == sim_start[8:10]):
if (str(date)[11:13] >= sim_start[11:13]):
#if condition to take just the first set of the next 120 hours without having many copies of them
if j >=0 and j <=120:
date_array[j] = date
j = j+1
#condition for precipitation to pick just the ensemble members and not every parameter set,
#since for prec do not change
if variable_type == 'P-kor':
if member[8:10] == '01':
ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start]*conv_factor
else:
ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start]*conv_factor
if (str(date)[8:10] > sim_start[8:10]):
#if condition to take just the first set of the next 120 hours without having many copies of them
if j >=0 and j <=120:
date_array[j] = date
j = j+1
#condition for precipitation to pick just the ensemble members and not every parameter set,
#since for prec do not change
if variable_type == 'P-kor':
if member[8:10] == '01':
ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start]*conv_factor
else:
ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start]*conv_factor
#case if we are in differen months -> can consider just the month and not the day
if (str(date)[5:7] > sim_start[5:7]):
#if condition to take just the first set of the next 120 hours without having many copies of them
if j >=0 and j <=120:
date_array[j] = date
j = j+1
#condition for precipitation to pick just the ensemble members and not every parameter set,
#since for prec do not change
if variable_type == 'P-kor':
if member[8:10] == '01':
ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start]*conv_factor
else:
ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start]*conv_factor
ens_df['date'] = date_array[1:]
ens_df.index = range(120)
return ens_df
def ens_param_groups(ens_df_runoff):
"""
From the ensemble dataframe, select groups of realizations based on the same ensemble members/parameters set:
create two dictionaries to contain the data based on choice of the representative members or of the parameters set
"""
#define a dictionary to contain the realizations based on groups of different rm
ens_members_dict = lambda: defaultdict(ens_members_dict)
ens_members_groups = ens_members_dict()
#considering all representative members from 00 to 20
for rm in range(21):
ens_members_groups[rm] = pd.DataFrame(index=range(120))
for realization in ens_df_runoff.columns[~ens_df_runoff.columns.isin(['date'])]:
#take just the realizations corresponding to the same rm
if str(realization)[2:4] == str('%02d' % rm):
ens_members_groups[rm][str(realization)] = ens_df_runoff[str(realization)]
ens_members_groups[rm]['date'] = ens_df_runoff['date']
#define a dictionary to contain the realizations based on groups of different parameter sets
param_sets_dict = lambda: defaultdict(param_sets_dict)
param_sets_groups = param_sets_dict()
#considering all representative members from 00 to 20
for pin in range(1,26):
param_sets_groups[pin] = pd.DataFrame(index=range(120))
for realization in ens_df_runoff.columns[~ens_df_runoff.columns.isin(['date'])]:
#take just the realizations corresponding to the same rm
if str(realization)[8:10] == str('%02d' % pin):
param_sets_groups[pin][str(realization)] = ens_df_runoff[str(realization)]
param_sets_groups[pin]['date'] = ens_df_runoff['date']
return ens_members_groups, param_sets_groups
def quantiles(ens_df):
"""
Calculate the quantiles for the ensemble dataframe considered (e.g. all realizations, or all param sets chosen a rm,...)
"""
#define a dataframe to contain the quantiles
quantiles = pd.DataFrame()
columns = ['0.0', '0.1', '0.2', '0.25', '0.3', '0.4', '0.5', '0.6', '0.7', '0.75', '0.8', '0.9', '0.95',
'0.975', '0.99', '1.0']
quantiles = ens_df.quantile([.0,.1,.2,.25,.3,.4,.5,.6,.7,.75,.8,.9,.95,.975,.99,1.0], axis=1)
#j=0
#calculate quantiles for every date considering every different simulation run
#for i in ens_df['date']:
# quantiles[j] = mquantiles(ens_df.loc[ens_df['date'] == i].drop('date', axis=1),
# prob=[0.0, 0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.9, 0.95,
# 0.975, 0.99, 1.0])
# j = j+1
#transpose the dataframe
quantiles = quantiles.T
quantiles.columns = columns
quantiles['date'] = ens_df['date']
return quantiles
def spaghetti_plot(ens_df_runoff, ens_df_prec, obs_subset, prec_obs_subset, sim_start, past=False, clustered=False, medians=False):
"""
Produce a spaghetti plot considering a set of the ensemble members: upper precipitation realizations, lower runoff
realizations, altogether with observations
Condition on variable "past": if it is False it's for the forecast with precipitation variability,
if it is True it's for looking at hydro param uncertainty in the past foreast where there is no prec variability
"""
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=(13,8), dpi=100)
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=2, colspan=1)
if past == False:
if clustered == False:
plt.title('Spaghetti plot for runoff and precipitation realizations for initialization ' + sim_start)
else:
plt.title('Spaghetti plot for clustered (5 RM) runoff and precipitation realizations for initialization ' + sim_start)
else:
plt.title('Spaghetti plot for runoff realizations, 5 days before initialization ' + sim_start)
plt.ylabel('Precipitation [mm h$^{-1}$]')
ax2 = plt.subplot2grid((6,1), (2,0), rowspan=4, colspan=1, sharex=ax1)
plt.ylabel('Discharge [m$^3$ s$^{-1}$]')
if past == False:
for member in ens_df_prec.columns[~ens_df_prec.columns.isin(['date'])]:
prec_member = ax1.plot(ens_df_prec.date, ens_df_prec[member], color='#023FA5', linewidth=0.5)
l1 = ax1.plot(prec_obs_subset.date, prec_obs_subset['P-kor'], linewidth=2, label='Prec obs', color='red')
ax1.invert_yaxis()
ax1.grid(True)
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position("right")
ax1.spines["bottom"].set_visible(False)
if past == False:
#label text box
if clustered == False:
prec_label='All ens members'
else:
prec_label='Cluster 5 rm'
ax1.text(0.015, 0.135, prec_label, transform=ax1.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#023FA5', alpha=0.3))
for member in ens_df_runoff.columns[~ens_df_runoff.columns.isin(['date'])]:
runoff_member = ax2.plot(ens_df_runoff.date, ens_df_runoff[member], color='#32AAB5', linewidth=0.5)
l2 = ax2.plot(obs_subset.date, obs_subset.runoff, linewidth=2, label='Runoff obs', color='orange')
ax2.grid(True)
ax2.spines["top"].set_visible(False)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=0.0)
#label text box
if past == False:
if clustered == False:
if medians == True:
runoff_label='Ens medians'
else:
runoff_label='\n'.join((r'All ens realizations', r'All pin realizations'))
else:
if medians == True:
runoff_label='Cluster 5 rm medians'
else:
runoff_label='\n'.join((r'Cluster 5 rm', r'All pin realizations'))
if past == True:
runoff_label = 'All pin realizations'
ax2.text(0.015, 0.965, runoff_label, transform=ax2.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#32AAB5', alpha=0.3))
#y axis limits
#ax2.set_ylim([0,500])
#x axis ticks and limits
days = mdates.DayLocator()
hours = mdates.HourLocator()
yearsFmt = mdates.DateFormatter('%Y-%m-%d') # %H:%M')
ax2.xaxis.set_major_locator(days)
ax2.xaxis.set_major_formatter(yearsFmt)
ax2.xaxis.set_minor_locator(hours)
# min and max on x axis
datemin = np.datetime64(ens_df_runoff.date[0], 'm') - np.timedelta64(60, 'm')
datemax = np.datetime64(ens_df_runoff.date[119], 'm') + np.timedelta64(25, 'm')
ax2.set_xlim(datemin, datemax)
if past == False:
fig.legend(handles=[prec_member[0], l1[0], runoff_member[0], l2[0]], ncol=2, framealpha=0.5,
loc=(0.5425,0.545), labels=['Prec member', 'Prec obs', 'Runoff member', 'Runoff obs']);
else:
fig.legend(handles=[l1[0], runoff_member[0], l2[0]], ncol=1, framealpha=0.5,
loc=(0.7425,0.545), labels=['Prec obs', 'Runoff member', 'Runoff obs']);
plt.rcParams.update({'font.size': 12})
return plt.show()
def hydrograph(quant_runoff, quant_prec, obs_subset, prec_obs_subset, sim_start, past=False, medians=False):
"""
Similar to spaghetti plot but with quantiles values, showing the median, the IQR and the total spread of both
precipitation and runoff forecast, altogether with observations
"""
#datetime conversion to use plt.fill_between otherwise it would not work with quantiles.date on x axis
date_conv = ['' for x in range(120)]
i=0
for date in quant_prec.date:
date_conv[i] = parser.parse(str(date))
i = i+1
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=(13,8), dpi=100)
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=2, colspan=1)
if past == False:
plt.title('Discharge hydrograph and forecast precipitation for initialization ' + sim_start)
else:
plt.title('Discharge hydrograph, 5 days before initialization ' + sim_start)
plt.ylabel('Precipitation [mm h$^{-1}$]')
ax2 = plt.subplot2grid((6,1), (2,0), rowspan=4, colspan=1, sharex=ax1)
plt.ylabel('Discharge [m$^3$ s$^{-1}$]')
if past == False:
ax1.fill_between(date_conv, quant_prec ['0.75'], quant_prec ['0.25'], facecolor='#023FA5', alpha='0.3')
ax1.fill_between(date_conv, quant_prec ['1.0'], quant_prec ['0.75'], facecolor='#023FA5', alpha='0.5')
ax1.fill_between(date_conv, quant_prec ['0.25'], quant_prec ['0.0'], facecolor='#023FA5', alpha='0.5')
l1 = ax1.plot(date_conv, quant_prec ['0.5'], linewidth=2, label='Prec $q_{50\%}$', color='#023FA5', alpha=1)
#label text box
prec_label='All ens members'
ax1.text(0.015, 0.135, prec_label, transform=ax1.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#023FA5', alpha=0.3))
l2 = ax1.plot(prec_obs_subset.date, prec_obs_subset['P-kor'], linewidth=2, label='Prec obs', color='red')
ax1.invert_yaxis()
ax1.grid(True)
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position("right")
ax1.spines["bottom"].set_visible(False)
ax2.fill_between(date_conv, quant_runoff ['0.75'], quant_runoff ['0.25'], facecolor='#32AAB5', alpha='0.3')
ax2.fill_between(date_conv, quant_runoff ['1.0'], quant_runoff ['0.75'], facecolor='#32AAB5', alpha='0.5')
ax2.fill_between(date_conv, quant_runoff ['0.25'], quant_runoff ['0.0'], facecolor='#32AAB5', alpha='0.5')
l3 = ax2.plot(date_conv, quant_runoff ['0.5'], linewidth=2, label='Runoff $q_{50\%}$', color='#32AAB5', alpha=1)
l4 = ax2.plot(obs_subset.date, obs_subset.runoff, linewidth=2, label='Runoff obs', color='orange')
ax2.grid(True)
ax2.spines["top"].set_visible(False)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=0.0)
if past == False:
if medians == False:
runoff_label='\n'.join((r'All ens realizations', r'All pin realizations'))
else:
runoff_label='Ens medians'
else:
runoff_label = 'All pin realizations'
#label text box
ax2.text(0.015, 0.965, runoff_label, transform=ax2.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#32AAB5', alpha=0.3))
#y axis limits
#ax2.set_ylim([0,500])
#x axis ticks and limits
days = mdates.DayLocator()
hours = mdates.HourLocator()
yearsFmt = mdates.DateFormatter('%Y-%m-%d') # %H:%M')
ax2.xaxis.set_major_locator(days)
ax2.xaxis.set_major_formatter(yearsFmt)
ax2.xaxis.set_minor_locator(hours)
# min and max on x axis
datemin = np.datetime64(quant_runoff.date[0], 'm') - np.timedelta64(60, 'm')
datemax = np.datetime64(quant_runoff.date[119], 'm') + np.timedelta64(25, 'm')
ax2.set_xlim(datemin, datemax)
runoff_IQR = mpatches.Patch(color='#32AAB5',alpha=0.3, label='Runoff IQR')
runoff_spread = mpatches.Patch(color='#32AAB5',alpha=0.5, label='Runoff spread')
if past == False:
prec_IQR = mpatches.Patch(color='#023FA5',alpha=0.3, label='Prec IQR')
prec_spread = mpatches.Patch(color='#023FA5',alpha=0.5, label='Prec spread')
legend = fig.legend(title='Precipitation Runoff', handles=[l2[0], l1[0], prec_IQR, prec_spread,
l4[0], l3[0], runoff_IQR, runoff_spread],
ncol=2, framealpha=0.5, loc=(0.645,0.526),
labels=[' Observation',
' Median $q_{50\%}$',
' IQR',
' Total spread',
'', '', '', '']);
if past == True:
fig.legend(handles=[l2[0], l3[0], runoff_IQR, runoff_spread, l4[0]], ncol=1, framealpha=0.5,
loc=(0.7,0.5), labels=['Prec obs', 'Runoff $q_{50\%}$', 'Runoff IQR', 'Runoff spread', 'Runoff obs']);
plt.rcParams.update({'font.size': 12})
return plt.show()
def comparison_meteo_hydrograph(quant_rm_medians, quant_runoff, quant_prec, obs_subset, prec_obs_subset, sim_start, thinning=False):
"""
Like hydrograph function but showing also the portion of spread not covered by the median meteo forecasts
"""
#datetime conversion to use plt.fill_between otherwise it would not work with quantiles.date on x axis
date_conv = ['' for x in range(120)]
i=0
for date in quant_rm_medians.date:
date_conv[i] = parser.parse(str(date))
i = i+1
"""
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=(13,8), dpi=100)
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=2, colspan=1)
if thinning == False:
uncover_facecol = '#FDE333'
alpha = 0.35
plt.title('Meteorological ens medians, \n Discharge hydrograph and forecast precipitation for initialization ' + sim_start)
else:
uncover_facecol = '#D1FBD4'
alpha=0.7
plt.title('Thinned forecast: removal of extreme meteorological members, \n Discharge hydrograph and forecast precipitation for initialization ' + sim_start)
plt.ylabel('Precipitation [mm h$^{-1}$]')
ax2 = plt.subplot2grid((6,1), (2,0), rowspan=4, colspan=1, sharex=ax1)
plt.ylabel('Discharge [m$^3$ s$^{-1}$]')
ax1.fill_between(date_conv, quant_prec ['0.75'], quant_prec ['0.25'], facecolor='#023FA5', alpha='0.3')
ax1.fill_between(date_conv, quant_prec ['1.0'], quant_prec ['0.75'], facecolor='#023FA5', alpha='0.5')
ax1.fill_between(date_conv, quant_prec ['0.25'], quant_prec ['0.0'], facecolor='#023FA5', alpha='0.5')
l1 = ax1.plot(date_conv, quant_prec ['0.5'], linewidth=2, label='Prec $q_{50\%}$', color='#023FA5', alpha=1)
#label text box
prec_label='All ens members'
ax1.text(0.015, 0.135, prec_label, transform=ax1.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#023FA5', alpha=0.3))
l2 = ax1.plot(prec_obs_subset.date, prec_obs_subset['P-kor'], linewidth=2, label='Prec obs', color='red')
ax1.invert_yaxis()
ax1.grid(True)
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position("right")
ax1.spines["bottom"].set_visible(False)
ax2.fill_between(date_conv, quant_rm_medians ['0.75'], quant_rm_medians ['0.25'], facecolor='#32AAB5', alpha='0.3')
ax2.fill_between(date_conv, quant_rm_medians ['1.0'], quant_rm_medians ['0.75'], facecolor='#32AAB5', alpha='0.5')
ax2.fill_between(date_conv, quant_rm_medians ['0.25'], quant_rm_medians ['0.0'], facecolor='#32AAB5', alpha='0.5')
l3 = ax2.plot(date_conv, quant_rm_medians ['0.5'], linewidth=2, label='Runoff $q_{50\%}$', color='#32AAB5', alpha=1)
l4 = ax2.plot(obs_subset.date, obs_subset.runoff, linewidth=2, label='Runoff obs', color='orange')
#fill between cluster quantile 1.0 and non-cluster quantile 1.0
ax2.fill_between(date_conv, quant_rm_medians ['1.0'], quant_runoff ['1.0'], facecolor=uncover_facecol, alpha=alpha, hatch='///', edgecolor='#32AAB5',linewidth=0.0)
ax2.fill_between(date_conv, quant_rm_medians ['0.0'], quant_runoff ['0.0'], facecolor=uncover_facecol, alpha=alpha, hatch='///', edgecolor='#32AAB5',linewidth=0.0) ##9E289E
ax2.grid(True)
ax2.spines["top"].set_visible(False)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=0.0)
#label text box
if thinning == False:
runoff_label='Ens medians'
else:
runoff_label='Thinned ens members'
ax2.text(0.015, 0.965, runoff_label, transform=ax2.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#32AAB5', alpha=0.3))
#y axis limits
#ax2.set_ylim([0,500])
#x axis ticks and limits
days = mdates.DayLocator()
hours = mdates.HourLocator()
yearsFmt = mdates.DateFormatter('%Y-%m-%d') # %H:%M')
ax2.xaxis.set_major_locator(days)
ax2.xaxis.set_major_formatter(yearsFmt)
ax2.xaxis.set_minor_locator(hours)
# min and max on x axis
datemin = np.datetime64(quant_rm_medians.date[0], 'm') - np.timedelta64(60, 'm')
datemax = np.datetime64(quant_rm_medians.date[119], 'm') + np.timedelta64(25, 'm')
ax2.set_xlim(datemin, datemax)
runoff_IQR = mpatches.Patch(color='#32AAB5',alpha=0.3, label='Runoff IQR')
runoff_spread = mpatches.Patch(color='#32AAB5',alpha=0.5, label='Runoff spread')
uncovered_runoff_spread = mpatches.Patch(facecolor=uncover_facecol,alpha=alpha, hatch='///', edgecolor='#32AAB5', label='Runoff spread uncovered by meteo medians')
prec_IQR = mpatches.Patch(color='#023FA5',alpha=0.3, label='Prec IQR')
prec_spread = mpatches.Patch(color='#023FA5',alpha=0.5, label='Prec spread')
empty = ax1.plot(prec_obs_subset.date, prec_obs_subset['P-kor'], linewidth=0)
if thinning == False:
covering_label = ' Spread not covered \n by meteo medians'
else:
covering_label = ' Spread not covered \n by thinned fcst'
legend = fig.legend(title='$\\bf Precipitation $ $\\bf Runoff$', handles=[l2[0], l1[0], prec_IQR, prec_spread, empty[0],
l4[0], l3[0], runoff_IQR, runoff_spread, uncovered_runoff_spread],
ncol=2, framealpha=0.5, loc=(0.644,0.526),
labels=[' Observation',
' Median $q_{50\%}$',
' IQR',
' Total spread',
f'{covering_label}', '',
'', '', '', '', '\n', '']);
plt.rcParams.update({'font.size': 12})
"""
#look at the percentage of spread covered by ens medians forecasts: calculate the spread ranges, do their ratio, divide by 120 (the amount of
#leadtime hours) and sum over all the hours. If present, remove NaNs values and divide not by 120 but by the length of non-NaNs values
#set a threshold on the obs to avoid low flow conditions: obs > 17.4 m3s-1
rm_medians_spread_runoff = quant_rm_medians['1.0'] - quant_rm_medians['0.0']
rm_medians_spread_runoff = rm_medians_spread_runoff.loc[obs_subset.runoff.reset_index(drop=True) > 17.4].reset_index(drop=True)
total_spread_runoff = quant_runoff ['1.0'] - quant_runoff['0.0']
total_spread_runoff = total_spread_runoff.loc[obs_subset.runoff.reset_index(drop=True) > 17.4].reset_index(drop=True)
raw_spread_ratio_runoff = rm_medians_spread_runoff/total_spread_runoff
nonNaNs_runoff = np.where(np.isnan(raw_spread_ratio_runoff)== False)[0]
spread_ratio_runoff = raw_spread_ratio_runoff[nonNaNs_runoff]/len(nonNaNs_runoff)
perc_spread_runoff = sum(spread_ratio_runoff)
uncov_runoff_spread = 1.0 - perc_spread_runoff
"""
fig.text(0.917,0.865,f'{uncov_runoff_spread*100:.1f}%', transform=ax2.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='white', alpha=0), zorder=10)
"""
plt.ioff()
return uncov_runoff_spread#, plt.show()
def hydrograph_rms(rm_high, rm_medium, rm_low, ens_df_prec, quant_rm_groups_runoff, quant_runoff, obs_subset,
prec_obs_subset, sim_start):
date_conv = ['' for x in range(120)]
i=0
for date in quant_runoff.date:
date_conv[i] = parser.parse(str(date))
i = i+1
date_conv_prec = ['' for x in range(len(prec_obs_subset))]
i=0
for date in prec_obs_subset.date:
date_conv_prec[i] = parser.parse(str(date))
i = i+1
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=(13,8), dpi=100)
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=2, colspan=1)
plt.title('Discharge hydrograph and forecast precipitation for initialization ' + sim_start)
plt.ylabel('Precipitation [mm h$^{-1}$]')
ax2 = plt.subplot2grid((6,1), (2,0), rowspan=4, colspan=1, sharex=ax1)
plt.ylabel('Discharge [m3 s$^{-1}$]')
ax1.plot(ens_df_prec.date, ens_df_prec[f'rm{str(rm_high).zfill(2)}_pin01'], color='#C94B7C', linewidth=1.5, linestyle='--')
ax1.plot(ens_df_prec.date, ens_df_prec[f'rm{str(rm_medium).zfill(2)}_pin01'], color='#848B00', linewidth=1.5, linestyle='--')
ax1.plot(ens_df_prec.date, ens_df_prec[f'rm{str(rm_low).zfill(2)}_pin01'], color='#32AAB5', linewidth=1.5, linestyle='--')
ax1.fill_between(date_conv_prec, prec_obs_subset['P-kor'], 0, facecolor='#023FA5', alpha='0.3')
ax1.invert_yaxis()
ax1.grid(True)
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position("right")
ax1.spines["bottom"].set_visible(False)
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_high]['0.75'], quant_rm_groups_runoff[rm_high]['0.25'], facecolor='#C94B7C', alpha='0.3')
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_high]['1.0'], quant_rm_groups_runoff[rm_high]['0.75'], facecolor='#C94B7C', alpha='0.5')
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_high]['0.25'], quant_rm_groups_runoff[rm_high]['0.0'], facecolor='#C94B7C', alpha='0.5')
l3_1 = ax2.plot(date_conv, quant_rm_groups_runoff[rm_high]['0.5'], linewidth=2, label='Runoff $q_{50\%}$', color='#C94B7C', alpha=1)
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_medium]['0.75'], quant_rm_groups_runoff[rm_medium]['0.25'], facecolor='#848B00', alpha='0.3')
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_medium]['1.0'], quant_rm_groups_runoff[rm_medium]['0.75'], facecolor='#848B00', alpha='0.5')
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_medium]['0.25'], quant_rm_groups_runoff[rm_medium]['0.0'], facecolor='#848B00', alpha='0.5')
l3_2 = ax2.plot(date_conv, quant_rm_groups_runoff[rm_medium]['0.5'], linewidth=2, label='Runoff $q_{50\%}$', color='#848B00', alpha=1)
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_low]['0.75'], quant_rm_groups_runoff[rm_low]['0.25'], facecolor='#32AAB5', alpha='0.3')
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_low]['1.0'], quant_rm_groups_runoff[rm_low]['0.75'], facecolor='#32AAB5', alpha='0.5')
ax2.fill_between(date_conv, quant_rm_groups_runoff[rm_low]['0.25'], quant_rm_groups_runoff[rm_low]['0.0'], facecolor='#32AAB5', alpha='0.5')
l3_3 = ax2.plot(date_conv, quant_rm_groups_runoff[rm_low]['0.5'], linewidth=2, label='Runoff $q_{50\%}$', color='#32AAB5', alpha=1)
l4 = ax2.plot(obs_subset.date, obs_subset.runoff, linewidth=1.5, label='Runoff obs', color='k')
ax2.grid(True)
ax2.spines["top"].set_visible(False)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=0.0)
#label text box
ax2.text(0.015, 0.965, f'rm={rm_high}', transform=ax2.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#C94B7C', alpha=0.3))
ax2.text(0.015, 0.875, f'rm={rm_medium}', transform=ax2.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#848B00', alpha=0.3))
ax2.text(0.015, 0.785, f'rm={rm_low}', transform=ax2.transAxes, fontsize=13,
verticalalignment='top', bbox=dict(boxstyle='round', facecolor='#32AAB5', alpha=0.3))
#y axis limits
#ax2.set_ylim([0,500])
#x axis ticks and limits
days = mdates.DayLocator()
hours = mdates.HourLocator()
yearsFmt = mdates.DateFormatter('%Y-%m-%d') # %H:%M')
ax2.xaxis.set_major_locator(days)
ax2.xaxis.set_major_formatter(yearsFmt)
ax2.xaxis.set_minor_locator(hours)
# min and max on x axis
datemin = np.datetime64(quant_runoff.date[0], 'm') - np.timedelta64(60, 'm')
datemax = np.datetime64(quant_runoff.date[119], 'm') + np.timedelta64(25, 'm')
ax2.set_xlim(datemin, datemax)
prec_obs = mpatches.Patch(color='#023FA5',alpha=0.3, label='Prec obs')
runoff_IQR = mpatches.Patch(color='#32AAB5',alpha=0.3, label='Runoff IQR')
runoff_spread = mpatches.Patch(color='#32AAB5',alpha=0.5, label='Runoff spread')
fig.legend(handles=[prec_obs, l3_3[0], runoff_IQR, runoff_spread, l4[0]], ncol=1, framealpha=0.5,
loc=(0.75,0.465), labels=['Prec obs','Runoff $q_{50\%}$', 'Runoff IQR', 'Runoff spread', 'Runoff obs']);
plt.rcParams.update({'font.size': 12})
return plt.show()
def past_hydro_unc_ensemble_df(df, sim_start, Verzasca_area, variable_type):
"""
Similarly to ensemble_df() it creates a dataframe containing all the different (hydrological) realizations
but for a period of time comprised in the 5 days before the simulation start, to look at the hydrological
uncertainty a posteriori (i.e. when meteorological uncertainty is not present because meteorological observations
are used in the past while the hydrological parameters can continue to change)
"""
#initialize the dataframe that contains all the realizations for a particular variable
past_ens_df = pd.DataFrame()
#index to account for the right dates without accounting them more than one time
j=0
#initialization of array to store the 120 hours dates
date_array = ["" for x in range(121)]
#condition on the variable chosen to convert discharge in m3/s:
if (variable_type == 'RTOT') or (variable_type == 'RGES'):
conv_factor = Verzasca_area/(1000.0*3600.0)
else:
conv_factor = 1.0
#5 days before the simulation start:
index_sim_start = int(df[sim_start]['rm00_pin01']['date'].loc[df[sim_start]['rm00_pin01']['date'] == sim_start].index.values)
#sim_start_minus5days = str(df[sim_start]['rm00_pin01']['date'].loc[df[sim_start]['rm00_pin01']['date'].index == index_sim_start])-120])[6:25]
sim_start_minus5days = '2018-11-03 00:00:00'
#for cycle on different members/paramsets (pick just the first 25 because all the other are the same, meteo doesnt change)
for member in list(df[sim_start].keys())[0:25]:
#for cycle on different dates
for date in df[sim_start][member]['date']:
#series of if conditions to account for the 120 hours just BEFORE the initialization point and not AFTER
#case if we are on the same month -> must consider month and day
if (str(date)[5:7] == sim_start_minus5days[5:7]):
#case if we are on the same day -> must consider hour
if (str(date)[8:10] == sim_start_minus5days[8:10]):
if (str(date)[11:13] >= sim_start_minus5days[11:13]):
#if condition to take just the first set of the next 120 hours without having many copies of them
if j >=0 and j <=120:
date_array[j] = date
j = j+1
#condition for precipitation to pick just the ensemble members and not every parameter set,
#since for prec do not change
if variable_type == 'P-kor':
if member[8:10] == '01':
#take the 120 hours in between the sim_start_minus5days and sim_start
past_ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start_minus5days].loc[df[sim_start][member]['date'] <= sim_start]*conv_factor
else:
past_ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start_minus5days].loc[df[sim_start][member]['date'] <= sim_start]*conv_factor
if (str(date)[8:10] > sim_start_minus5days[8:10]):
#if condition to take just the first set of the next 120 hours without having many copies of them
if j >=0 and j <=120:
date_array[j] = date
j = j+1
#condition for precipitation to pick just the ensemble members and not every parameter set,
#since for prec do not change
if variable_type == 'P-kor':
if member[8:10] == '01':
past_ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start_minus5days].loc[df[sim_start][member]['date'] <= sim_start]*conv_factor
else:
past_ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start_minus5days].loc[df[sim_start][member]['date'] <= sim_start]*conv_factor
#case if we are in differen months -> can consider just the month and not the day
if (str(date)[5:7] > sim_start[5:7]):
#if condition to take just the first set of the next 120 hours without having many copies of them
if j >=0 and j <=120:
date_array[j] = date
j = j+1
#condition for precipitation to pick just the ensemble members and not every parameter set,
#since for prec do not change
if variable_type == 'P-kor':
if member[8:10] == '01':
past_ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start_minus5days].loc[df[sim_start][member]['date'] <= sim_start]*conv_factor
else:
past_ens_df[member] = df[sim_start][member][variable_type].loc[df[sim_start][member]['date'] > sim_start_minus5days].loc[df[sim_start][member]['date'] <= sim_start]*conv_factor
past_ens_df['date'] = date_array[1:]
past_ens_df.index = range(120)
return past_ens_df
def hydro_unc_boxplot(quant_rm_groups_runoff, sim_start, normalized=False):
"""
For every timestep (hour) calculate the spread range q100-q0 and the IQR range q75-q25 for every realization (meteo median)
and based on where the (median?) is, place it in the right runoff range, then calculate the boxplot for every range of
discharge and for the total spread and the IQR
"""
#decide in which way to split the discharge value along the y-axis:
runoff_ranges_names = ['0-25', '25-50', '50-75', '75-100', '>100']
runoff_ranges_values = [25.0, 50.0, 75.0, 100.0]
#runoff_ranges_names = ['0-50', '50-100', '100-150', '150-200', '>200']
#runoff_ranges_values = [50.0, 100.0, 150.0, 200.0]
# Dictionary of dataframes for every ens member look at hydro unc around it
hydro_unc_dict = lambda: defaultdict(hydro_unc_dict)
hydro_unc = hydro_unc_dict()
for rm in range(21):
hydro_unc[rm] = pd.DataFrame(index=range(120*2),columns=runoff_ranges_names)
hydro_unc[rm]['unc_interval'] = np.nan
for rm in range(21):
j=0
for i in range(120):
#calculate the spread range and IQR range for every time step,
#choose if normalized with the median value at that point or not:
if normalized == True :
spread_range = (quant_rm_groups_runoff[rm]['1.0'][i] - quant_rm_groups_runoff[rm]['0.0'][i]) / quant_rm_groups_runoff[rm]['0.5'][i]
IQR_range = (quant_rm_groups_runoff[rm]['0.75'][i] - quant_rm_groups_runoff[rm]['0.25'][i]) / quant_rm_groups_runoff[rm]['0.5'][i]
else :
spread_range = (quant_rm_groups_runoff[rm]['1.0'][i] - quant_rm_groups_runoff[rm]['0.0'][i])
IQR_range = (quant_rm_groups_runoff[rm]['0.75'][i] - quant_rm_groups_runoff[rm]['0.25'][i])
#series of if conditions to address in which range we are, look at the median
if (quant_rm_groups_runoff[rm]['0.5'][i] < runoff_ranges_values[0]):
hydro_unc[rm][runoff_ranges_names[0]][j+1] = spread_range
hydro_unc[rm][runoff_ranges_names[0]][j] = IQR_range
if ((quant_rm_groups_runoff[rm]['0.5'][i] >= runoff_ranges_values[0]) & (quant_rm_groups_runoff[rm]['0.5'][i] < runoff_ranges_values[1]) ) :
hydro_unc[rm][runoff_ranges_names[1]][j+1] = spread_range
hydro_unc[rm][runoff_ranges_names[1]][j] = IQR_range
if ((quant_rm_groups_runoff[rm]['0.5'][i] >= runoff_ranges_values[1]) & (quant_rm_groups_runoff[rm]['0.5'][i] < runoff_ranges_values[2]) ) :
hydro_unc[rm][runoff_ranges_names[2]][j+1] = spread_range
hydro_unc[rm][runoff_ranges_names[2]][j] = IQR_range
if ((quant_rm_groups_runoff[rm]['0.5'][i] >= runoff_ranges_values[2]) & (quant_rm_groups_runoff[rm]['0.5'][i] <= runoff_ranges_values[3]) ) :
hydro_unc[rm][runoff_ranges_names[3]][j+1] = spread_range
hydro_unc[rm][runoff_ranges_names[3]][j] = IQR_range
if (quant_rm_groups_runoff[rm]['0.5'][i] > runoff_ranges_values[3]) :
hydro_unc[rm][runoff_ranges_names[4]][j+1] = spread_range
hydro_unc[rm][runoff_ranges_names[4]][j] = IQR_range
hydro_unc[rm]['unc_interval'][j+1] = 'Total spread: q100 - q0'
hydro_unc[rm]['unc_interval'][j] = 'IQR: q75 - q25'
j=j+2
# Merge all dataframes together
hydro_unc_tot = pd.concat((hydro_unc[rm] for rm in range(21)), ignore_index=True)
"""
sns.set(style="ticks", palette="pastel")
fig, ax = plt.subplots(1, 1, figsize=(10,7), dpi=100)
plt.title('Hydrological uncertainty obtained for every rm median for initialization ' + sim_start)
melted_hydro_unc = pd.melt(hydro_unc_tot, id_vars=['unc_interval'])
melted_hydro_unc.value = melted_hydro_unc.value.astype(float)
sns.boxplot(data=melted_hydro_unc,x="value", y='variable', hue='unc_interval',
palette=["#E4CBF9", "#9AE1E1"])
ax.invert_yaxis()
sns.despine(offset=10, trim=True)
plt.ylabel('Discharge interval [m3 s$^{-1}$]')
if normalized == True :
xlabel = 'Normalized spread interval range'
else :
xlabel = 'Spread interval range [m3 s$^{-1}$]'
plt.xlabel(xlabel)
plt.legend(title='Hydro param spread dispersion', loc='lower right')
plt.grid()
"""
#count how many times the spread interval is in every different discharge range:
n_samples = pd.DataFrame(index=range(1))
for column in hydro_unc_tot.columns[~hydro_unc_tot.columns.isin(['unc_interval'])]:
n_samples[column] = hydro_unc_tot[column].count()/len(hydro_unc_tot[column])
n_samples_sns = pd.DataFrame(columns=['spread_int', 'freq'])
n_samples_sns['spread_int'] = n_samples.columns
n_samples_sns['freq'] = n_samples.loc[n_samples.index == 0].iloc[0].values
melted_hydro_unc = pd.melt(hydro_unc_tot, id_vars=['unc_interval'])
melted_hydro_unc.value = melted_hydro_unc.value.astype(float)
if normalized == True :
xlabel = 'Normalized spread interval range'
else :
xlabel = 'Spread interval range [m3 s$^{-1}$]'
#PLOT
sns.set(style="ticks", palette="pastel")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13,8), dpi=100)
fig.suptitle('Hydrological uncertainty spread distribution for initialization ' + sim_start, y=0.95)
ax1 = plt.subplot2grid((1,6), (0,0), rowspan=1, colspan=4)
sns.boxplot(data=melted_hydro_unc,x="value", y='variable', hue='unc_interval',
palette=["#E4CBF9", "#9AE1E1"])
ax1.invert_yaxis()
sns.despine(offset=10, trim=True)
ax1.grid(True)
ax1.set_ylabel('Discharge interval [m$^3$ s$^{-1}$]')
ax1.set_xlabel(xlabel)
ax1.get_legend().remove()
ax2 = plt.subplot2grid((1,6), (0,4), rowspan=1, colspan=2, sharey=ax1)
ax2.set_xlabel('Frequency')
sns.barplot(x='freq', y='spread_int', data=n_samples_sns, color='#7DD3FF', linewidth=1.5, edgecolor=".2")
ax2.invert_yaxis()
sns.despine(offset=10, trim=False)
ax2.yaxis.tick_right()
ax2.invert_xaxis()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel('')
ax2.grid(True)
ax2.set_xlabel('Spread interval frequency')
ax2.set_xlim(round(max(n_samples_sns['freq']),1), 0)
ax2.set_xticks(np.arange(0, round(max(n_samples_sns['freq'])+0.09,1)+0.01, 0.1))
ax2.spines["left"].set_visible(False)
fig.legend(title='Hydro param spread dispersion', loc=(0.409,0.801))
fig.subplots_adjust(wspace=0.0)
return plt.show()
"""
Some basic statistical functions on the forecast realizations:
"""
def IQR(q075, q025):
return (q075-q025)
def MAD(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
def YuleKendall(q025, q05, q075):
""" Yule-Kendall index: a robust and resistant alternative to the sample skewness
If the dataset is right-skewed the Yule-Kendall index is positive
"""
return ((q025 - 2*q05 + q075)/(q075 - q025))
def stat_quant_df(quantiles, ens_df):
"""
Same statistical functions but on quantiles
"""
quantiles_stat = pd.DataFrame(columns=['IQR', 'MAD', 'YuleKendall'], index=quantiles.index)
for i in quantiles.index:
quantiles_stat.IQR[i] = IQR(quantiles['0.75'][i], quantiles['0.25'][i])
quantiles_stat.MAD[i] = MAD(ens_df.loc[ens_df.index == i].drop('date', axis=1))
quantiles_stat.YuleKendall[i] = YuleKendall(quantiles['0.25'][i], quantiles['0.5'][i], quantiles['0.75'][i])
IQR_avg = np.mean(quantiles['0.75']) - np.mean(quantiles['0.25'])
spread_range_avg = np.mean(quantiles['1.0']) - np.mean(quantiles['0.0'])
quantiles_stat['date'] = quantiles['date']
print('The average IQR = <q_75> - <q_25> = %e' % IQR_avg)
print('The average range of spread = <q_100> - <q_0> = %e' % spread_range_avg )
return quantiles_stat
"""
Forecast verification tools: Brier score, Brier skill score (MUST BE CORRECTED FOR #ENSEMBLE MEMBERS!)
and plots, calculation of POD,FAR and POFD to plot the ROC curve
"""
def BS(ens,obs,y0,lead_time):
#y0: threshold value
#rename obs.index from 0 to 119 (lead time hours)
obs.index = range(len(obs))
#define obs as binary variable: o=1 if the event occured, o=0 if the event did not occur
o = obs*0
for k in obs.index[0:lead_time]:
if obs[k] >= y0[k]:
o[k] = 1
else:
o[k] = 0
j=0
y = np.zeros(len(ens))
#calculate the yk probability that the event was forecasted, as a probability among all different realizations
for i in ens.index:
for column in ens.columns[~ens.columns.isin(['date'])]: #drop the last column of dates
if ens[column][i] >= y0[i]: #if ensemble value higher than threshold
j=j+1
y[i] = j/len(ens.columns) #calculation of probability of threshold exceedance
j=0
n=len(ens.index)
return y,o,(1/n)*sum((y-o)**2)
def BS_plot(ens_df_runoff, rm_medians, obs_subset, y0, lead_times, plotting=True):
BSs_runoff_tot = pd.DataFrame(index = range(len(lead_times)), columns=['BS', 'lead_time [h]'])
BSs_runoff_met = pd.DataFrame(index = range(len(lead_times)), columns=['BS', 'lead_time [h]'])
for lead_time in lead_times:
BSs_runoff_tot['BS'][lead_time/24-1] = BS(ens_df_runoff, obs_subset.runoff, y0, lead_time)[2]
BSs_runoff_met['BS'][lead_time/24-1] = BS(rm_medians, obs_subset.runoff, y0, lead_time)[2]
BSs_runoff_tot['lead_time [h]'][lead_time/24-1] = lead_time
BSs_runoff_met['lead_time [h]'][lead_time/24-1] = lead_time
if (plotting == True):
fig, ax = plt.subplots(1, 1, figsize=(6,4), dpi=100)
plt.rcParams.update({'font.size': 13})
plt.scatter(BSs_runoff_tot['lead_time [h]'], BSs_runoff_tot['BS'], color='red', label='tot')
plt.scatter(BSs_runoff_met['lead_time [h]'], BSs_runoff_met['BS'], color='blue', label='met')
ax.xaxis.set_major_locator(plt.FixedLocator(locs=lead_times))
plt.grid(linestyle='--')
plt.xlabel('Lead times [h]')
plt.ylabel('BS');
plt.legend()
plt.title('Brier scores for threshold runoff > %i' % int(float(y0[0])) + ' m3 s-1'); #when considering e.g. median y0.name*100
return BSs_runoff_tot, BSs_runoff_met, plt.show()
def BSS(BS,obs,y0):
#reinitialize obs.index from 0 to 119 (lead time hours)
obs.index = range(len(obs))
o = obs*0
for k in obs.index:
if obs[k] >= y0[k]:
o[k] = 1
else:
o[k] = 0
o_avg = np.mean(o)
n=len(obs.index)
BSref = (1/n)*sum((o_avg-o)**2)
return o,o_avg, BSref, 1-BS/BSref
def BSS_plot(BSs_runoff_tot, BSs_runoff_met, obs_subset, y0, lead_times):
fig, ax = plt.subplots(1, 1, figsize=(6,4), dpi=100)
plt.rcParams.update({'font.size': 13})
plt.scatter(BSs_runoff_tot['lead_time [h]'], BSS(BSs_runoff_tot.BS, obs_subset.runoff,y0)[3],
color='red', label='tot')
plt.scatter(BSs_runoff_tot['lead_time [h]'], BSS(BSs_runoff_met.BS, obs_subset.runoff,y0)[3],
color='blue', label='met')
#plt.scatter(BSs_runoff_met['lead_time [h]'], BSs_runoff_met['BS'], color='blue', label='met')
#plt.scatter(BSs_runoff_hyd['lead_time [h]'], BSs_runoff_hyd['BS'], color='green', label='hyd')
ax.xaxis.set_major_locator(plt.FixedLocator(locs=lead_times))
plt.grid(linestyle='--')
plt.xlabel('Lead times [h]')
plt.ylabel('BSS');
plt.legend()
plt.title('Brier skill scores for threshold q%i' % int(float(y0[0])) + ' m3 s-1');
return plt.show()
def POD(realizations,obs,y0,lead_time):
# #threshold exceedance correctly forecasted:num
num = 0
# #threshold exceedance occured
den = 0
#change index to have both dfs with the same one:
obs.index = range(len(obs))
for i in obs.index[0:lead_time]:
if obs[i] >= y0[i]:
den = den+1
if realizations[i] >= y0[i]:
num = num+1
if den == 0:
den=1
return num/den
def FAR(realizations,obs,y0,lead_time):
# #false alarms
num = 0
# #forecasted threshold exceedances
den = 0
#change index to have both dfs with the same one:
obs.index = range(len(obs))
for i in obs.index[0:lead_time]:
if realizations[i] >= y0[i]:
den = den+1
if obs[i] < y0[i]:
num = num+1
if den == 0:
den=1
return num/den
def POFD(realizations,obs,y0,lead_time):
# #false alarms
num = 0
# #observed non-events
den = 0
#change index to have both dfs with the same one:
obs.index = range(len(obs))
for i in obs.index[0:lead_time]:
if obs[i] < y0[i]:
den = den+1
if realizations[i] >= y0[i]:
num = num+1
if den == 0:
den=1
return num/den
def PODs_FARs_POFDs(ens, obs, y0, lead_times, variable='runoff'):
#create dataframes with #rows given by the number of realizations in ens_df_runoff and #columns given by the lead times
PODs = pd.DataFrame(index = range(len(ens.columns[~ens.columns.isin(['date'])])),
columns=['24','48','72','96','120'])
FARs = pd.DataFrame(index = range(len(ens.columns[~ens.columns.isin(['date'])])),
columns=['24','48','72','96','120'])
POFDs = pd.DataFrame(index = range(len(ens.columns[~ens.columns.isin(['date'])])),
columns=['24','48','72','96','120'])
#different lead times: 1-5 days forecasts
for lead_time in [24,48,72,96,120]:
#different realizations, from 0 to 525
for column in ens.columns[~ens.columns.isin(['date'])]:
PODs[str(lead_time)][ens.columns.get_loc(column)] = POD(ens[column], obs[f'{variable}'], y0, lead_time)
FARs[str(lead_time)][ens.columns.get_loc(column)] = FAR(ens[column], obs[f'{variable}'], y0, lead_time)
POFDs[str(lead_time)][ens.columns.get_loc(column)] = POFD(ens[column], obs[f'{variable}'], y0, lead_time)
#sort all of the values in ascending order
PODs_sorted = PODs*0.0
FARs_sorted = PODs*0.0
POFDs_sorted = PODs*0.0
for column in PODs.columns:
PODs_sorted[column] = PODs[column].sort_values(ascending=True).values
FARs_sorted[column] = FARs[column].sort_values(ascending=True).values
POFDs_sorted[column] = POFDs[column].sort_values(ascending=True).values
return PODs_sorted, FARs_sorted, POFDs_sorted
def ROC_plot(PODs, FARs_or_POFDs, y0, xlabel, variable='runoff', title_text=''):
fig, ax = plt.subplots(1, 1, figsize=(7,7))
plt.rcParams.update({'font.size': 14})
jet= plt.get_cmap('rainbow')
colors = iter(jet(np.linspace(0,1,5)))
for column in PODs.columns:
color=next(colors)
#xx,yy to calculate ROCa
xx = np.concatenate((np.array([0.0]), FARs_or_POFDs[column].values, np.array([1.0])))
yy = np.concatenate((np.array([0.0]), PODs[column].values, np.array([1.0])))
ax.plot([0,FARs_or_POFDs[column][0]],[0,PODs[column][0]], '.-', lw=2, markersize=8, color=color)
ax.plot(FARs_or_POFDs[column],PODs[column],'.-', lw=2, markersize=8,
color=color, label=(f'{int(column)} h {auc(xx,yy):.4}'))
ax.plot([FARs_or_POFDs[column][-1:],1],[PODs[column][-1:],1], '.-', lw=2, markersize=8, color=color)
ax.hlines(y=1.0, xmin=-0.05, xmax=1, linewidth=1, color='black', linestyle='--')
ax.plot([0,1],[0,1], linewidth=1, color='black', linestyle='--')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.xlim(-0.025,1.025)
plt.ylim(-0.025,1.025)
plt.xlabel(xlabel)
plt.ylabel('POD')
plt.legend(title='Lead times - ROCa',fontsize=13, loc='lower right', frameon=True)
if variable == 'precipitation':
units = 'mm'
else:
units = 'm3 s-1'
plt.title(f'ROC curve ' + title_text + f'for {variable} threshold > {int(float(y0[0]))} {units}'); #when considering e.g. median y0.name*100
return plt.show()
def rank_histogram(ens, obs, title_text, realizations_size=525, ranks_number=21, member_extracted=(9,11)):
"""
Plot the verification rank histogram to check on forecast consistency
"""
obs.index = range(len(obs))
#condition on ranks_number: if 25 we are considering pin members that goes [1:25], otherwise ens members that goes
#[0:20] for medians or [0:525] considering all possible realizations
if ranks_number == 25:
df_rank = pd.DataFrame(index = range(1,ranks_number+1), columns=['rank','pin_member'])
else:
df_rank = pd.DataFrame(index = range(ranks_number), columns=['rank','ens_member'])
#initialize ranks with all zeros
df_rank['rank'][:] = 0
for i in obs.index:
#consider all possible ensemble realizations and obs at the same time
df = ens.loc[i]
#merge obs value to dataframe
df.loc['obs'] = obs.loc[i]
#sort all values ascendingly
df = df.sort_values(ascending=True)
#create new dataframe with new index (range(0,526)) and with a column with ensemble names and obs
members = df.index
new_index = range(len(df))
df = pd.DataFrame(df)
df['members'] = members
df.index = new_index
#extract obs row in dataframe
obs_merged = df.loc[df['members'] == 'obs']
#if conditions to account for cases when obs is at the beginning or end of df
if (obs_merged.index == 0):
nearest = df.loc[obs_merged.index+1]
if (obs_merged.index == realizations_size):
nearest = df.loc[obs_merged.index-1]
elif ((obs_merged.index != 0) and (obs_merged.index != realizations_size)):
#select the two nearest element to obs (general case)
obs_near = df.loc[df.loc[df['members'] == 'obs'].index-1 | df.loc[df['members'] == 'obs'].index |
df.loc[df['members'] == 'obs'].index+1]
nearest = obs_near.iloc[(obs_near[i]-obs_near[i].loc[df['members'] == 'obs']).abs().argsort()[:1]]
#extract ensemble member from nearest i.e. # bin associated to histogram
rank_point=int(str(nearest['members'])[member_extracted[0]:member_extracted[1]]) #[9:11] for 525 realizations
#add the rank point to the correspondent element in df rank
df_rank['rank'][rank_point] = df_rank['rank'][rank_point] + 1
if ranks_number == 25:
df_rank['pin_member'] = range(1,ranks_number+1)
ens_or_pin_column = df_rank['pin_member']
else:
df_rank['ens_member'] = range(ranks_number)
ens_or_pin_column = df_rank['ens_member']
#plotting the histogram:
fig, ax = plt.subplots(1, 1, figsize=(7,4), dpi=100)
plt.rcParams.update({'font.size': 13})
plt.bar(ens_or_pin_column, df_rank['rank']/120, width=0.95);
ax.xaxis.set_major_locator(plt.FixedLocator(locs=ens_or_pin_column))
ax.tick_params(axis='both', labelsize=10)
plt.ylabel('Frequency')
plt.xlabel('Ensemble member');
plt.title('Verification rank histogram'+title_text);
return df_rank, plt.show()
def correlation_plot(y0, obs_subset, lead_times, title_text):
"""
Plot the correlation between e.g. the median of the realizations and the observation at different lead times,
report in the legend the values of r2 for every different lead time considered
"""
plt.figure(figsize=(8,8))
plt.rcParams.update({'font.size': 15})
#set of colors for different lead times
colors = ['#242989','#445BA6','#658BCF','#87BAFB','#A8E9FF']
ncolor = 0
obs_subset.index = range(len(obs_subset))
for lead_time in lead_times:
#compute the fit between obs and forecast
X = obs_subset['runoff'][0:lead_time]
y = y0[0:lead_time]
X = sm.add_constant(X)
model = sm.OLS(y, X)
results = model.fit()
for k in obs_subset.index[0:lead_time]:
plt.plot(obs_subset['runoff'][k], y0[k], 'o',markersize=10, color=colors[ncolor],
alpha = 1, zorder=1/(ncolor+1), label='%i' % lead_time + ' h, R$^2$ = %f' % (results.rsquared))
ncolor+=1
plt.plot([-10, max(obs_subset.runoff+10)],[-10,max(y0)+10], linewidth=1, color='black',
linestyle='--')
plt.xlabel('Observed runoff [m3 s$^{-1}$]')
plt.ylabel('Forecast median runoff [m3 s$^{-1}$]');
plt.xlim(-5, max(obs_subset.runoff+10))
plt.ylim(-5, max(y0+10));
#print legend without repetions of labels
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc='lower right', numpoints = 1);
plt.title('Correlation plot'+title_text)
return(plt.show())
def peak_box(ens, obs, all_dates_hours, sim_start, title_text='all realizations', A=186):
"""
Plot the peak-box approach for the group of runoff realizations considered together with observation:
find the peak for every realization in the entire temporal domain, find out the first and the last one happening in time
and the ones with highest and lowest magnitudes, plot the peak-box, find the IQR box from all the peaks and timings
and plot it, find the peak and timing medians and plot it.
Calculate the full and IQR sharpness of the forecasts and the deviations of observation peak from the peak represented
by peak and timing median.
"""
fig, ax = plt.subplots(1, 1, figsize=(10,6), dpi=100)
plt.title('Peak-box approach for '+title_text+ ' for initialization ' + sim_start)
#dataframe containing the values of peak discharges for every realization
df_max_runoff = pd.DataFrame(index=(ens.columns[~ens.columns.isin(['date'])]),
columns=['max', 'date', 'hour'])
for member in ens.columns[~ens.columns.isin(['date'])]:
# Find all the local peak maximums for every realization, excluding borders of x domain (hour 0 and hour 120)
peaks = find_peaks(ens[member][1:-1], height=0)
# Select the maximum value of the peaks found and find its date and hour associated
df_max_runoff['max'][member] = max(peaks[1]['peak_heights'])
df_max_runoff['date'][member] = ens['date'][1:-1].loc[ens[member][1:-1] == df_max_runoff['max'][member]].iloc[0]
df_max_runoff['hour'][member] = int(ens.loc[ens['date'] == df_max_runoff['date'][member]].index.values)
ax.plot(ens.date, ens[member], color='#32AAB5', linewidth=0.5)
ax.plot(df_max_runoff.date[member], df_max_runoff['max'][member], 'o',markersize=5, color='blue', alpha=0.15,
zorder=1000)
#observation
l2 = ax.plot(obs.date, obs.runoff, linewidth=2, label='Runoff obs', color='orange')
#observation peak:
# Find all the local peak maximums for obs, excluding borders of x domain (hour 0 and hour 120)
peaks_obs = find_peaks(obs.runoff[1:-1], height=0)
max_peak = max(peaks_obs[1]['peak_heights'])
l3 = ax.plot(obs.date.loc[obs.runoff == max_peak], max_peak, 'o', markersize=8, color='red',
alpha=0.8, zorder=1001, label='($t_{obs}$, $p_{obs}$)')
#report all peak and timing(hour) and correspondent dates quantiles in a dataframe
peaks_timings = pd.DataFrame(index=range(5), columns=['peak', 'timing', 'date'])
peaks_timings['peak'] = mquantiles(df_max_runoff['max'], prob=[0.0,0.25,0.5,0.75,1.0])
peaks_timings['timing'] = mquantiles(df_max_runoff.hour, prob=[0.0,0.25,0.5,0.75,1.0]).astype(int)
for i in range(5):
peaks_timings['date'][i] = str(all_dates_hours['date'].loc[all_dates_hours['hour'] ==
peaks_timings['timing'][i]].iloc[0])
"""
Peak-Box (outer rectangle):
"""
#the lower left coordinate set to the earliest time when a peak flow occurred in the available ensemble members (t0)
#and the lowest peak discharge of all members during the whole forecast period (p0)
lower_left_pb = [peaks_timings['date'][0], peaks_timings['peak'][0]]
#upper right coordinate set to the latest time when a peak flow occurred in the available ensemble members (t100)
#and the highest peak discharge of all members during the whole forecast period (p100)
upper_right_pb = [peaks_timings['date'][4], peaks_timings['peak'][4]]
alpha=0.5
color='blue'
lw=2
plt.plot([pd.to_datetime(lower_left_pb[0]), pd.to_datetime(lower_left_pb[0])],
[lower_left_pb[1], upper_right_pb[1]], color=color, alpha=alpha, lw=lw)
plt.plot([pd.to_datetime(lower_left_pb[0]), pd.to_datetime(upper_right_pb[0])],
[lower_left_pb[1], lower_left_pb[1]], color=color, alpha=alpha, lw=lw)
plt.plot([pd.to_datetime(upper_right_pb[0]), pd.to_datetime(upper_right_pb[0])],
[lower_left_pb[1], upper_right_pb[1]], color=color, alpha=alpha, lw=lw)
plt.plot([pd.to_datetime(lower_left_pb[0]), pd.to_datetime(upper_right_pb[0])],
[upper_right_pb[1], upper_right_pb[1]], color=color, alpha=alpha, lw=lw)
"""
IQR-box (inner rectangle):
"""
#lower left coordinate set to the 25% quartile of the peak timing (t25)
#and the 25% quartile of the peak discharges of all members during the whole forecast period (p25)
lower_left_IQRbox = [peaks_timings['date'][1], peaks_timings['peak'][1]]
#[str(all_dates_hours['date'].loc[all_dates_hours['hour'] == int(df_max_quantiles_timing[1])].iloc[0]),
#mquantiles(df_max_runoff['max'], prob=[0.0,0.25,0.5,0.75,1.0])[1]]
#upper right coordinate of the IQR-Box is defined as the 75% quartile of the peak timing (t75)
#and the 75% quartile of the peak discharges of all members (p75)
upper_right_IQRbox = [peaks_timings['date'][3], peaks_timings['peak'][3]]
#[str(all_dates_hours['date'].loc[all_dates_hours['hour'] == int(df_max_quantiles_timing[3])].iloc[0]),
# mquantiles(df_max_runoff['max'], prob=[0.0,0.25,0.5,0.75,1.0])[3]]
plt.plot([pd.to_datetime(lower_left_IQRbox[0]), pd.to_datetime(lower_left_IQRbox[0])],
[lower_left_IQRbox[1], upper_right_IQRbox[1]], color=color, alpha=alpha, lw=lw)
plt.plot([pd.to_datetime(lower_left_IQRbox[0]), pd.to_datetime(upper_right_IQRbox[0])],
[lower_left_IQRbox[1], lower_left_IQRbox[1]], color=color, alpha=alpha, lw=lw)
plt.plot([pd.to_datetime(upper_right_IQRbox[0]), pd.to_datetime(upper_right_IQRbox[0])],
[lower_left_IQRbox[1], upper_right_IQRbox[1]], color=color, alpha=alpha, lw=lw)
plt.plot([pd.to_datetime(lower_left_IQRbox[0]), pd.to_datetime(upper_right_IQRbox[0])],
[upper_right_IQRbox[1], upper_right_IQRbox[1]], color=color, alpha=alpha, lw=lw)
"""
Median of the peak discharge:
"""
#horizontal line going from t0 to t100 representing the median of the peak discharge (p50)
#of all members of the ensemble forecast
plt.plot([pd.to_datetime(lower_left_pb[0]), pd.to_datetime(upper_right_pb[0])],
[peaks_timings['peak'][2], peaks_timings['peak'][2]], color=color, alpha=alpha, lw=lw)
"""
Median of the peak timing:
"""
#vertical line going from p0 to p100 representing the median of the peak timing (t50)
plt.plot([pd.to_datetime(peaks_timings['date'][2]), pd.to_datetime(peaks_timings['date'][2])],
[lower_left_pb[1], upper_right_pb[1]], color=color, alpha=alpha, lw=lw)
ax.grid(True)
#y axis limits
#ax.set_ylim([0,500])
#x axis ticks and limits
days = mdates.DayLocator()
hours = mdates.HourLocator()
yearsFmt = mdates.DateFormatter('%Y-%m-%d') # %H:%M')
ax.xaxis.set_major_locator(days)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(hours)
# min and max on x axis
datemin = np.datetime64(ens.date[0], 'm') - np.timedelta64(60, 'm')
datemax = np.datetime64(ens.date[119], 'm') + np.timedelta64(25, 'm')
ax.set_xlim(datemin, datemax)
runoff_member = ax.plot(ens.date, ens[ens.columns[0]], color='#32AAB5',
linewidth=0.5, label='Runoff member')
peak_member = ax.plot(df_max_runoff.date[member], df_max_runoff['max'][member], 'o',markersize=5, color='blue',
alpha=0.3, zorder=1000, label='($t_i$, $p_i$), $i \in $#realizations ')
median_peak = ax.plot(ens.date.loc[ens.date == peaks_timings['date'][2]],
peaks_timings['peak'][2], '*', markersize=15, color='red',
alpha=10, zorder=1001, label='($t_{50}$, $p_{50}$)')
fig.legend(handles=[runoff_member[0], l2[0], peak_member[0], median_peak[0], l3[0]], ncol=1, numpoints = 1,
labels=['Runoff member', 'Runoff obs', '($t_i$, $p_i$), $i \in $#realizations', '($t_{50}$, $p_{50}$)',
'($t_{obs}$, $p_{obs}$)'], loc=(0.66,0.66));
plt.rcParams.update({'font.size': 10});
"""
Sharpness of the forecast:
PB_full = (p100-p0)(t100-t0)*3.6/A with A the area of the basin in km2
PB_IQR = (p75-p25)(t75-t25)*3.6/A
"""
PB_full = ((peaks_timings['peak'][4] - peaks_timings['peak'][0])*
(peaks_timings['timing'][4] - peaks_timings['timing'][0])*3.6/A)
PB_IQR = ((peaks_timings['peak'][3] - peaks_timings['peak'][1])*
(peaks_timings['timing'][3] - peaks_timings['timing'][1])*3.6/A)
"""
Verification of peak median vs obs:
Dpeak = |p50-pobs|
Dtime = |t50-tobs|
"""
D_peak = abs(peaks_timings['peak'][2] - max(obs.runoff))
D_time = abs(peaks_timings['timing'][2] - int(obs.runoff.loc[obs.runoff == max(obs.runoff)].index.values))
return (plt.show(), print(f'PBfull = {PB_full:.5} mm'), print(f'PBiqr = {PB_IQR:.5} mm'),
print(f'Dpeak = {D_peak:.5} m3 s-1'), print(f'Dtime = {D_time} h'))
def peak_box_multipeaks(ens, obs, all_dates_hours, sim_start, title_text='all realizations', delta_t=10, gamma=3.0/5.0,
decreas_hours=8, beta = 1.0/4.0, A=186):
"""
Plot the peak-box approach for the group of runoff realizations considered together with observation:
find the peak for every realization in the entire temporal domain, find out the first and the last one happening in time
and the ones with highest and lowest magnitudes, plot the peak-box, find the IQR box from all the peaks and timings
and plot it, find the peak and timing medians and plot it.
Calculate the full and IQR sharpness of the forecasts and the deviations of observation peak from the peak represented
by peak and timing median.
"""
"""
MULTI-PEAKS APPROACH: deeveloped for sim_start = '2018-10-27 12:00:00'
1 - Know all the peaks presented by a realization
2 - Decide a criteria to consider just "relevant" peaks i.e. peaks that can be associated to different events
3 - Based on the remained peaks regroup them considering all realizations and procced in drawing the boxes
"""
# Implement the division of peaks for all realizations considered:
# dictionary to contain all the event peaks for different ens members
peaks_dict = lambda: defaultdict(peaks_dict)
event_peaks_dict = peaks_dict()
# dictionary to contain the decrescency boolean values for every realization
decreas_dict = lambda: defaultdict(decreas_dict)
decreas = decreas_dict()
count_trues_array = np.zeros(120)
for member in ens.columns[~ens.columns.isin(['date'])]:
#Find all the local peak maximums, excluding borders of x domain (hour 0 and hour 119)
peaks = find_peaks(ens[member][1:-1], height=0)
peak_date = pd.DataFrame(index=range(len(peaks[1]['peak_heights'])), columns=['date'])
for p in range(len(peaks[1]['peak_heights'])):
peak_date['date'][p] = ens['date'][1:-1].loc[ens[member][1:-1] == peaks[1]['peak_heights'][p]].iloc[0]
# DECIDE A CRITERIA TO KEEP JUST THE "IMPORTANT" PEAKS:
# must take into consideration the behaviour of the function i.e. how much it increases/decreases between two peaks
# and also the amount of time to consider to distinguish between different events
# empty dataframe to contain so-called event peaks i.e. the relatively important peaks associated to events
event_peaks = pd.DataFrame(index=range(120),columns=['hour','date', 'peak'])
# delta timing to consider the behaviour of the realization: consider the previous and the next delta_t hours to keep a peak or not,
# if in the next delta_t hours the function decreases at least 2/5 (i.e. 1-gamma) since the peak value -> keep the peak
#delta_t = 10 #hours
#gamma = 3.0/5.0
# look if the amount of discharge decreases at least 1-gamma after the peak value before increasing again:
n_event = 0
for p in range(len(peaks[1]['peak_heights'])):
for k in range(-delta_t, delta_t):
# if conditions: - must not go beyond the 120 hours limit and before the beginning at 0 hours,
# - the function must decrease after the peak
# - at least one of the delta_t point after/before the peak must be lower of 1-gamma: 2/5? (1/3 ? must tune for the
# right number) the value of the peak
if (peaks[0][p]+k > 0) and (peaks[0][p]+k < 120) and (ens[member][peaks[0][p]+2] < ens[member][peaks[0][p]+1]) and (ens[member][peaks[0][p]+k] < ens[member][peaks[0][p]+1]*gamma) :
event_peaks['hour'][n_event] = peaks[0][p]+1
event_peaks['date'][n_event] = ens['date'][1:-1].loc[ens[1:-1].index == event_peaks['hour'][n_event]].iloc[0]
event_peaks['peak'][n_event] = ens[member][peaks[0][p]+1]
n_event = n_event+1
break
#keep just the rows with peaks
event_peaks = event_peaks[pd.notnull(event_peaks['peak'])]
# for loop to keep just one peak if other peaks are very near (+- 7 hours?):
while True:
# "save" old index to compare it with the new one at the end when some peak are withdrawn
old_event_peaks_index = event_peaks.index
for i,j in zip(event_peaks.index, event_peaks.index+1):
# conditions to avoid problems when considering the last peak of the domain
if (i == event_peaks.index[-1] + 1) or (j == event_peaks.index[-1] + 1):
break
#condition to discard very near in time peaks with very similar values:
if (event_peaks.hour[i] >= event_peaks.hour[j] - 7): #or (event_peaks.hour[i] <= event_peaks.hour[j] + 4):
# condition to keep the highest peak between the two near peaks considered:
if event_peaks['peak'][j] > event_peaks['peak'][i]:
event_peaks = event_peaks.drop(event_peaks.index[i])
elif event_peaks['peak'][j] < event_peaks['peak'][i]:
event_peaks = event_peaks.drop(event_peaks.index[j])
event_peaks.index = range(len(event_peaks))
# if condition to keep the length of the index correct: if old index and new index lengths are equal exit the while loop
if len(old_event_peaks_index) == len(event_peaks.index):
break
# write all the event peaks obtained in a dictionary for different members:
event_peaks_dict[member] = event_peaks
# NOW: must seek a criteria to split all the peaks found by groups related to different runoff maxima events.
# 1st approach: look if the majority of the realizations decrease altogether in a certain temporal window:
# for every realizations check if for every hour timestep the next 10? hours (=decreas_hour) decreases from that value
# then check every realization for every hour timestep (120x525 matrix) and if for a specific timestep
# at least 2/3? of the realizations show decreasing behaviour split the domain among peaks
decreas[member] = np.array(range(120), dtype=bool)
#decreas_hours = 8
for h in range(120):
if all(x > y for x, y in zip(ens[member][h:h+decreas_hours], ens[member][h+1:h+decreas_hours+1])):
decreas[member][h] = True
else:
decreas[member][h] = False
#count for every hour the amount of Trues i.e. how many realizations show a decreasing behaviour for the next decreas_hours
for h in range(120):
if decreas[member][h] == True:
count_trues_array[h] = count_trues_array[h] + 1
peak_groups_dict = lambda: defaultdict(peak_groups_dict)
peak_groups = peak_groups_dict()
#initialize the splitting_hour list with the first element given by the 0th hour (i.e. the first group start from the beginning of the
#time domain)
splitting_hour = []
splitting_hour.append(0)
#decreasing parameter: the amount of realizations that show the decreasing behaviour
#beta = 1.0/4.0
for h in range(120):
# condition to divide all the peaks in different groups:
# if at least beta of all realizations after a peak are decreasing for at least decreas_hours -> splitting
if count_trues_array[h] >= len(ens.columns[~ens.columns.isin(['date'])])*beta :
# add the splitting hour found to the splitting_hour list
splitting_hour.append(h)
# write in peak_groups dictionary all the peaks for every different realizations that stay between two splitting hours
for member in ens.columns[~ens.columns.isin(['date'])]:
for peak_hour in event_peaks_dict[member]['hour']:
if peak_hour <= splitting_hour[-1]:
peak_groups[splitting_hour[-1]][member] = event_peaks_dict[member].loc[(event_peaks_dict[member]['hour'] > splitting_hour[-2]) &
(event_peaks_dict[member]['hour'] < splitting_hour[-1])]
# conditions to drop all the empty groups from peak_groups (must check if correct!):
# if all the dataframes of one group are empty -> delete group
for group in list(peak_groups):
if all(peak_groups[group][member].empty for member in peak_groups[group].keys()):
#remove empty groups
peak_groups.pop(group)
# if more than 8.5/10 (15%) of the dataframes of a group are empty -> remove group???
for group in list(peak_groups):
empty_dataframes = 0
for member in peak_groups[group].keys():
if peak_groups[group][member].empty :
empty_dataframes = empty_dataframes + 1
if (empty_dataframes >= len(peak_groups[group].keys())*8.5/10.0):
peak_groups.pop(group)
# if in a group an element is not a dataframe (dont know why?!) remove that element:
for group in list(peak_groups):
for member in peak_groups[group].keys():
if (isinstance(peak_groups[group][member], pd.DataFrame) == False) :
peak_groups[group].pop(member)
# OBSERVATION PEAKS:
# apply the same procedure as before to distinguish peaks related to different events:
#reset obs index
obs = obs.reset_index()
#Find all the local peak maximums for obs, excluding borders of x domain (hour 0 and hour 120)
OBSpeaks = find_peaks(obs.runoff[1:-1], height=0)
OBSpeak_date = pd.DataFrame(index=range(len(OBSpeaks[1]['peak_heights'])), columns=['date'])
for p in range(len(OBSpeaks[1]['peak_heights'])):
OBSpeak_date['date'][p] = obs['date'][1:-1].loc[obs['runoff'][1:-1] == OBSpeaks[1]['peak_heights'][p]].iloc[0]
# empty dataframe to contain so-called event peaks i.e. the relatively important peaks associated to events
OBSevent_peaks = pd.DataFrame(index=range(120),columns=['hour','date', 'peak'])
# delta timing to consider the behaviour of the realization: consider the previous and the next delta_t hours to keep a peak or not,
# if in the next delta_t hours the function decreases at least 1/3 since the peak value -> keep the peak
#delta_t = 10 #hours
# look if the amount of discharge decreases at least 1/3 after the peak value before increasing again:
n_event = 0
for p in range(len(OBSpeaks[1]['peak_heights'])):
for k in range(-delta_t, delta_t):
# if conditions: - must not go beyond the 120 hours limit and before the beginning at 0 hours,
# - the function must decrease after the peak
# - at least one of the delta_t point after the peak must be lower of 2/5 (1/3 ? must tune for the
# right number) the value of the peak
if (OBSpeaks[0][p]+k > 0) and (OBSpeaks[0][p]+k < 120) and (obs.runoff[OBSpeaks[0][p]+2] < obs.runoff[OBSpeaks[0][p]+1]) and (obs.runoff[OBSpeaks[0][p]+k] < obs.runoff[OBSpeaks[0][p]+1]*gamma) :
#print(p)
OBSevent_peaks['hour'][n_event] = OBSpeaks[0][p]+1
OBSevent_peaks['date'][n_event] = obs['date'][1:-1].loc[ens[1:-1].index == OBSevent_peaks['hour'][n_event]].iloc[0]
OBSevent_peaks['peak'][n_event] = obs.runoff[OBSpeaks[0][p]+1]
n_event = n_event+1
break
#keep just the rows with peaks
OBSevent_peaks = OBSevent_peaks[pd.notnull(OBSevent_peaks['peak'])]
# for loop to keep just one peak if other peaks are very near (+- 7 hours?):
while True:
# "save" old index to compare it with the new one at the end when some peak are withdrawn
OBSold_event_peaks_index = OBSevent_peaks.index
for i,j in zip(OBSevent_peaks.index, OBSevent_peaks.index+1):
# conditions to avoid problems when considering the last peak of the domain
if (i == OBSevent_peaks.index[-1] + 1) or (j == OBSevent_peaks.index[-1] + 1):
break
#condition to discard very near in time peaks with very similar values:
if (OBSevent_peaks.hour[i] >= OBSevent_peaks.hour[j] - 7): #or (event_peaks.hour[i] <= event_peaks.hour[j] + 4):
# condition to keep the highest peak between the two near peaks considered:
if OBSevent_peaks['peak'][j] > OBSevent_peaks['peak'][i]:
OBSevent_peaks = OBSevent_peaks.drop(OBSevent_peaks.index[i])
elif OBSevent_peaks['peak'][j] < OBSevent_peaks['peak'][i]:
OBSevent_peaks = OBSevent_peaks.drop(OBSevent_peaks.index[j])
OBSevent_peaks.index = range(len(OBSevent_peaks))
# if condition to keep the length of the index correct: if old index and new index lengths are equal exit the while loop
if len(OBSold_event_peaks_index) == len(OBSevent_peaks.index):
break
# PLOT:
# plot all peaks in different groups
#jet= plt.get_cmap('tab10')
#colors = iter(jet(np.linspace(0,len(peak_groups.keys()),5)))
colors = itertools.cycle(["#e60000", "#0000e6", "#e6e600", "#bf00ff", "#009933", "#b35900"])
fig, ax = plt.subplots(1, 1, figsize=(10,6), dpi=100)
plt.title('Peak-box approach for initialization ' + sim_start)
for member in ens.columns[~ens.columns.isin(['date'])]:
runoff_member = ax.plot(ens.date, ens[member], color='#32AAB5', linewidth=0.5, alpha=0.65)
for group in peak_groups.keys():
color = next(colors)
for member in peak_groups[group].keys():
peak_member = ax.plot(peak_groups[group][member]['date'], peak_groups[group][member]['peak'],'o',markersize=2, color=color,
alpha=0.5, zorder=10)
#observation series plot
l2 = ax.plot(obs.date, obs.runoff, linewidth=2, label='Runoff obs', color='orange', zorder = 15)
#observation peaks plot
for OBSpeak in OBSevent_peaks.index:
peak_obs = ax.plot(OBSevent_peaks['date'][OBSpeak], OBSevent_peaks['peak'][OBSpeak],'*',markersize=20, color='orange',
markeredgecolor='black', markeredgewidth=1.5, alpha=1, zorder=100)
# NOW: develop peak boxes for every different group:
"""
Peak-Box (outer rectangle):
IQR-box (inner rectangle):
Median of the peak discharge:
Median of the peak timing:
"""
#lower_left_pb = pd.DataFrame(index=range(len(peak_groups.keys())))
#upper_right_pb = pd.DataFrame(index=range(len(peak_groups.keys())))
peak_legend = pd.DataFrame(index=range(len(peak_groups.keys())))
median_legend = pd.DataFrame(index=range(len(peak_groups.keys())))
#jet= plt.get_cmap('tab10')
colors = itertools.cycle(["#e60000", "#0000e6", "#e6e600", "#bf00ff", "#009933", "#b35900"])#iter(jet(np.linspace(0,len(peak_groups.keys()),5)))
for group in peak_groups.keys():
color = next(colors)
# empty arrays to contain all the dates/peaks for every different realization of one specific group
all_dates_of_group = []
all_hours_of_group = []
all_peaks_of_group = []
# write all dates, hours and peaks for every possible realizations for every group in peak_groups
for member in peak_groups[group].keys():
for date in peak_groups[group][member]['date']:
all_dates_of_group.append(str(date))
for peak in peak_groups[group][member]['peak']:
all_peaks_of_group.append(peak)
for hour in peak_groups[group][member]['hour']:
all_hours_of_group.append(hour)
# PEAK-BOX:
#the lower left coordinate set to the earliest time when a peak flow occurred in the available ensemble members (t0)
#and the lowest peak discharge of all members during the whole forecast period (p0)
lower_left_pb = [min(all_dates_of_group), min(all_peaks_of_group)]
#upper right coordinate set to the latest time when a peak flow occurred in the available ensemble members (t100)
#and the highest peak discharge of all members during the whole forecast period (p100)
upper_right_pb = [max(all_dates_of_group), max(all_peaks_of_group)]
#plot the peak-boxes
alpha=0.75
lw=2
zorder = 20
plt.plot([pd.to_datetime(lower_left_pb[0]), pd.to_datetime(lower_left_pb[0])],
[lower_left_pb[1], upper_right_pb[1]], color=color, alpha=alpha, lw=lw, zorder=zorder)
plt.plot([pd.to_datetime(lower_left_pb[0]), pd.to_datetime(upper_right_pb[0])],
[lower_left_pb[1], lower_left_pb[1]], color=color, alpha=alpha, lw=lw, zorder=zorder)
plt.plot([pd.to_datetime(upper_right_pb[0]), pd.to_datetime(upper_right_pb[0])],
[lower_left_pb[1], upper_right_pb[1]], color=color, alpha=alpha, lw=lw, zorder=zorder)
plt.plot([pd.to_datetime(lower_left_pb[0]), pd.to_datetime(upper_right_pb[0])],
[upper_right_pb[1], upper_right_pb[1]], color=color, alpha=alpha, lw=lw, zorder=zorder)
# IQR-BOX:
#calculate the quantiles of peaks and timings and convert timings in dates
peaks_quantiles = mquantiles(all_peaks_of_group, prob=[0.0,0.25,0.5,0.75,1.0])
hours_quantiles = mquantiles(sorted(all_hours_of_group), prob=[0.0,0.25,0.5,0.75,1.0]).astype(int)
dates_quantiles = ['']*5
for i in range(5):
dates_quantiles[i] = str(all_dates_hours['date'].loc[all_dates_hours['hour'] ==
hours_quantiles[i]].iloc[0])
#lower left coordinate set to the 25% quartile of the peak timing (t25)
#and the 25% quartile of the peak discharges of all members during the whole forecast period (p25)
lower_left_IQRbox = [dates_quantiles[1], peaks_quantiles[1]]
#upper right coordinate of the IQR-Box is defined as the 75% quartile of the peak timing (t75)
#and the 75% quartile of the peak discharges of all members (p75)
upper_right_IQRbox = [dates_quantiles[3], peaks_quantiles[3]]
plt.plot([pd.to_datetime(lower_left_IQRbox[0]), pd.to_datetime(lower_left_IQRbox[0])],
[lower_left_IQRbox[1], upper_right_IQRbox[1]], color=color, alpha=alpha, lw=lw, zorder=zorder)
plt.plot([pd.to_datetime(lower_left_IQRbox[0]), pd.to_datetime(upper_right_IQRbox[0])],
[lower_left_IQRbox[1], lower_left_IQRbox[1]], color=color, alpha=alpha, lw=lw, zorder=zorder)
plt.plot([pd.to_datetime(upper_right_IQRbox[0]), | pd.to_datetime(upper_right_IQRbox[0]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
from numpy import nan, float64
from jqfactor_analyzer.prepare import get_clean_factor_and_forward_returns
from jqfactor_analyzer.performance import (
factor_information_coefficient,
factor_autocorrelation,
mean_information_coefficient,
quantile_turnover,
factor_returns, factor_alpha_beta,
average_cumulative_return_by_quantile
)
from jqfactor_analyzer.utils import get_forward_returns_columns
dr = pd.date_range(start='2015-1-1', end='2015-1-2')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = pd.DataFrame(index=dr,
columns=tickers,
data=[[1, 2, 3, 4],
[4, 3, 2, 1]]).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
factor_data = pd.DataFrame()
factor_data['factor'] = factor
factor_data['group'] = pd.Series(index=factor.index,
data=[1, 1, 2, 2, 1, 1, 2, 2],)
factor_data['weights'] = pd.Series(range(8), index=factor.index,
dtype=float64) + 1
@pytest.mark.parametrize(
('factor_data', 'forward_returns', 'group_adjust',
'by_group', 'expected_ix', 'expected_ic_val'),
[(factor_data, [4, 3, 2, 1, 1, 2, 3, 4], False, False, dr, [-1., -1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, False, dr, [1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True,
pd.MultiIndex.from_product([dr, [1, 2]], names=['date', 'group']),
[1., 1., 1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], True, True,
pd.MultiIndex.from_product([dr, [1, 2]], names=['date', 'group']),
[1., 1., 1., 1.])]
)
def test_information_coefficient(factor_data,
forward_returns,
group_adjust,
by_group,
expected_ix,
expected_ic_val):
factor_data = factor_data.copy()
factor_data['period_1'] = pd.Series(index=factor_data.index,
data=forward_returns)
ic = factor_information_coefficient(factor_data=factor_data,
group_adjust=group_adjust,
by_group=by_group)
expected_ic_df = pd.DataFrame(index=expected_ix,
columns=pd.Index(['period_1'], dtype='object'),
data=expected_ic_val)
pd.testing.assert_frame_equal(ic, expected_ic_df)
@pytest.mark.parametrize(
(
'factor_data', 'forward_returns', 'group_adjust',
'by_group', 'by_time', 'expected_ix', 'expected_ic_val'
), [
(factor_data, [4, 3, 2, 1, 1, 2, 3, 4], False, False, 'D',
dr, [-1., -1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, False, 'W',
pd.DatetimeIndex(['2015-01-04'], name='date', freq='W-SUN'), [1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True, None,
pd.Int64Index([1, 2], name='group'), [1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True, 'W',
pd.MultiIndex.from_product(
[pd.DatetimeIndex(['2015-01-04'], name='date', freq='W-SUN'),
[1, 2]],
names=['date', 'group']
),
[1., 1.])
]
)
def test_mean_information_coefficient(factor_data,
forward_returns,
group_adjust,
by_group,
by_time,
expected_ix,
expected_ic_val):
factor_data = factor_data.copy()
factor_data['period_1'] = pd.Series(index=factor_data.index,
data=forward_returns)
ic = mean_information_coefficient(factor_data,
group_adjust=group_adjust,
by_group=by_group,
by_time=by_time)
expected_ic_df = pd.DataFrame(index=expected_ix,
columns=pd.Index(['period_1']),
data=expected_ic_val)
pd.testing.assert_frame_equal(ic, expected_ic_df,
check_index_type=False,
check_column_type=False)
@pytest.mark.parametrize(
('quantile_values', 'test_quantile', 'expected_vals'),
[([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
4.0,
[nan, 1.0, 1.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
3.0,
[nan, 0.0, 0.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0]],
2.0,
[nan, 1.0, 1.0, 1.0])]
)
def test_quantile_turnover(quantile_values, test_quantile,
expected_vals):
dr = pd.date_range(start='2015-1-1', end='2015-1-4')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
quantized_test_factor = pd.Series(
pd.DataFrame(index=dr, columns=tickers, data=quantile_values).stack()
)
quantized_test_factor.index = quantized_test_factor.index.set_names(
['date', 'asset']
)
to = quantile_turnover(quantized_test_factor, test_quantile)
expected = pd.Series(
index=quantized_test_factor.index.levels[0], data=expected_vals)
expected.name = test_quantile
pd.testing.assert_series_equal(to, expected)
@pytest.mark.parametrize(
('factor_data', 'factor_vals', 'fwd_return_vals',
'group_adjust', 'expected_vals'),
[(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], [4, 3, 2, 1, 1, 2, 3, 4],
False, [-1.25000, -1.25000]),
(factor_data, [1, 1, 1, 1, 1, 1, 1, 1], [4, 3, 2, 1, 1, 2, 3, 4],
False, [0.0, 0.0]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], [4, 3, 2, 1, 1, 2, 3, 4],
True, [-0.5, -0.5]),
(factor_data, [1, 2, 3, 4, 1, 2, 3, 4], [1, 4, 1, 2, 1, 2, 2, 1],
True, [1.0, 0.0]),
(factor_data, [1, 1, 1, 1, 1, 1, 1, 1], [4, 3, 2, 1, 1, 2, 3, 4],
True, [0.0, 0.0])]
)
def test_factor_returns(factor_data,
factor_vals,
fwd_return_vals,
group_adjust,
expected_vals):
factor_data = factor_data.copy()
factor_data['period_1'] = fwd_return_vals
factor_data['factor'] = factor_vals
factor_returns_s = factor_returns(factor_data=factor_data,
demeaned=True,
group_adjust=group_adjust)
expected = pd.DataFrame(
index=dr,
data=expected_vals,
columns=get_forward_returns_columns(factor_data.columns)
)
pd.testing.assert_frame_equal(factor_returns_s, expected)
@pytest.mark.parametrize(
('factor_data', 'fwd_return_vals', 'alpha', 'beta'),
[(factor_data, [1, 2, 3, 4, 1, 1, 1, 1], -1, 5. / 6.)]
)
def test_factor_alpha_beta(factor_data, fwd_return_vals, alpha, beta):
factor_data = factor_data.copy()
factor_data['period_1'] = fwd_return_vals
ab = factor_alpha_beta(factor_data=factor_data)
expected = pd.DataFrame(columns=['period_1'],
index=['Ann. alpha', 'beta'],
data=[alpha, beta])
pd.testing.assert_frame_equal(ab, expected)
@pytest.mark.parametrize(
('factor_values', 'end_date', 'period', 'expected_vals'),
[([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'2015-1-4', 1,
[nan, 1.0, 1.0, 1.0]),
([[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0]],
'2015-1-4', 1,
[nan, -1.0, -1.0, -1.0]),
([[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0]],
'2015-1-12', 3,
[nan, nan, nan, 1.0, 1.0, 1.0, 0.6, -0.6, -1.0, 1.0, -0.6, -1.0])]
)
def test_factor_autocorrelation(factor_values,
end_date,
period,
expected_vals):
dr = pd.date_range(start='2015-1-1', end=end_date)
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = pd.DataFrame(index=dr,
columns=tickers,
data=factor_values).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor_df = pd.DataFrame()
factor_df['factor'] = factor
fa = factor_autocorrelation(factor_df, period)
expected = pd.Series(index=dr, data=expected_vals)
expected.name = period
pd.testing.assert_series_equal(fa, expected)
@pytest.mark.parametrize(
('before', 'after', 'demeaned', 'quantiles', 'expected_vals'),
[(1, 2, False, 4,
[[1.00, 0.0, -0.50, -0.75],
[0.0, 0.0, 0.0, 0.0],
[0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0],
[-0.20, 0.0, 0.25, 0.5625],
[0.0, 0.0, 0.0, 0.0],
[-0.3333333, 0.0, 0.50, 1.25],
[0.0, 0.0, 0.0, 0.0]]),
(1, 2, True, 4,
[[0.8833333, 0.0, -0.5625, -1.015625],
[0.0, 0.0, 0.0, 0.0],
[-0.1166667, 0.0, -0.0625, -0.265625],
[0.0, 0.0, 0.0, 0.0],
[-0.3166667, 0.0, 0.1875, 0.296875],
[0.0, 0.0, 0.0, 0.0],
[-0.4500000, 0.0, 0.4375, 0.984375],
[0.0, 0.0, 0.0, 0.0]]),
(3, 0, False, 4,
[[7.0, 3.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.488, -0.36, -0.2, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.703704, -0.55555555, -0.333333333, 0.0],
[0.0, 0.0, 0.0, 0.0]]),
(0, 3, True, 4,
[[0.0, -0.5625, -1.015625, -1.488281],
[0.0, 0.0, 0.0, 0.0],
[0.0, -0.0625, -0.265625, -0.613281],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.1875, 0.296875, 0.339844],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.4375, 0.984375, 1.761719],
[0.0, 0.0, 0.0, 0.0]]),
(3, 3, False, 2,
[[3.5, 1.5, 0.5, 0.0, -0.25, -0.375, -0.4375],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.595852, -0.457778, -0.266667, 0.0, 0.375, 0.90625, 1.664062],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),
(3, 3, True, 2,
[[2.047926, 0.978888, 0.383333, 0.0, -0.3125, -0.640625, -1.050781],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-2.047926, -0.978888, -0.383333, 0.0, 0.3125, 0.640625, 1.050781],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])]
)
def test_average_cumulative_return_by_quantile(before, after,
demeaned, quantiles,
expected_vals):
dr = pd.date_range(start='2015-1-15', end='2015-2-1')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
r1, r2, r3, r4 = (1.25, 1.50, 1.00, 0.50)
data = [[r1**i, r2**i, r3**i, r4**i] for i in range(1, 19)]
prices = pd.DataFrame(index=dr, columns=tickers, data=data)
dr2 = | pd.date_range(start='2015-1-21', end='2015-1-26') | pandas.date_range |
import os
import sys
import yaml
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from gssl import DATA_DIR
from gssl.datasets import load_dataset
from gssl.transductive_model import Model
from gssl.transductive_model_arxiv import ArxivModel
from gssl.tasks import evaluate_node_classification
from gssl.utils import seed
def main():
seed()
# Read dataset name
dataset_name = sys.argv[1]
loss_name = sys.argv[2]
# Read params
with open(
"experiments_ssl/configs/hyperparameter_search_transductive.yaml",
"r"
) as fin:
params = yaml.safe_load(fin)[loss_name][dataset_name]
data, masks = load_dataset(name=dataset_name)
hps_file = os.path.join(
DATA_DIR, f"ssl/{loss_name}/{dataset_name}/hps.csv"
)
os.makedirs(os.path.dirname(hps_file), exist_ok=True)
# Make hyperparameter space
grid = [
(p_x, p_e)
for p_x in np.arange(
params["p_x"]["min"],
params["p_x"]["max"] + params["p_x"]["step"],
params["p_x"]["step"],
)
for p_e in np.arange(
params["p_e"]["min"],
params["p_e"]["max"] + params["p_e"]["step"],
params["p_e"]["step"],
)
]
# Which model to use
if dataset_name == "ogbn-arxiv":
model_cls = ArxivModel
else:
model_cls = Model
records = []
for p_x, p_e in tqdm(grid, desc="Grid search"):
model = model_cls(
feature_dim=data.x.size(-1),
emb_dim=params["emb_dim"],
loss_name=loss_name,
p_x=p_x,
p_e=p_e,
lr_base=params["lr_base"],
total_epochs=params["total_epochs"],
warmup_epochs=params["warmup_epochs"],
)
model.fit(data=data)
z = model.predict(data=data)
accuracy = evaluate_node_classification(
z=z, data=data, masks=masks[0],
use_pytorch=(dataset_name == "ogbn-arxiv"),
)
records.append({
"p_x": p_x,
"p_e": p_e,
"accuracy": accuracy,
})
(
| pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
import streamlit as st
import pandas as pd
def app():
st.title('Workshop')
st.write('This page will show the graphs and tables based on the Faculty Particpation in Workshops, STTP, FDP, etc')
data = st.file_uploader("Upload your relevant excel file")
df = pd.ExcelFile(data)
name = df.sheet_names
page = st.select_slider( '', options=["Department", "Faculty"])
if page == "Department":
time = ["Semester wise", "Year wise", "Cumulative years"]
temp = st.selectbox('Select the time period:', time)
if temp == "Semester wise":
for i in range(6):
df1 = pd.read_excel(df, sheet_name=name[i])
st.title(name[i])
st.write('**Table based on Criteria**')
st.table(df1.groupby('Criteria :=> Workshop / STTP / Training Program / FDP').size().sort_values(ascending=True).reset_index(name='No. of Faculties participated in Workshop'))
st.write('**Bar Graph based on Criteria**')
u1 = df1['Criteria :=> Workshop / STTP / Training Program / FDP'].value_counts()
st.bar_chart(u1)
st.write('**Table based on Organizer**')
st.table(df1.groupby('Organizer').size().sort_values(ascending=True).reset_index(name='No. of Faculties participated in Workshop'))
st.write('**Bar Graph based on Organizer**')
u1 = df1['Organizer'].value_counts()
st.bar_chart(u1)
st.write('**Table based on Level**')
st.table(df1.groupby('Local / State / National / International').size().sort_values(ascending=True).reset_index(name='No. of Faculties participated in Workshop'))
st.write('**Bar Graph based on Level**')
u1 = df1['Local / State / National / International'].value_counts()
st.bar_chart(u1)
st.write('**Table based on Source of Funding**')
st.table(df1.groupby('Source of Funding').size().sort_values(ascending=True).reset_index(name='No. of Faculties participated in Workshop'))
st.write('**Bar Graph based on Source of Funding**')
u1 = df1['Source of Funding'].value_counts()
st.bar_chart(u1)
elif temp == "Year wise":
for i in range(6,9):
df1 = pd.read_excel(df, sheet_name=name[i])
st.title(name[i])
st.write('**Table based on Criteria**')
st.table(df1.groupby('Criteria :=> Workshop / STTP / Training Program / FDP').size().sort_values(ascending=True).reset_index(name='No. of Faculties participated in Workshop'))
st.write('**Bar Graph based on Criteria**')
u1 = df1['Criteria :=> Workshop / STTP / Training Program / FDP'].value_counts()
st.bar_chart(u1)
st.write('**Table based on Organizer**')
st.table(df1.groupby('Organizer').size().sort_values(ascending=True).reset_index(name='No. of Faculties participated in Workshop'))
st.write('**Bar Graph based on Organizer**')
u1 = df1['Organizer'].value_counts()
st.bar_chart(u1)
st.write('**Table based on Level**')
st.table(df1.groupby('Local / State / National / International').size().sort_values(ascending=True).reset_index(name='No. of Faculties participated in Workshop'))
st.write('**Bar Graph based on Level**')
u1 = df1['Local / State / National / International'].value_counts()
st.bar_chart(u1)
st.write('**Table based on Source of Funding**')
st.table(df1.groupby('Source of Funding').size().sort_values(ascending=True).reset_index(name='No. of Faculties participated in Workshop'))
st.write('**Bar Graph based on Source of Funding**')
u1 = df1['Source of Funding'].value_counts()
st.bar_chart(u1)
else:
df1 = | pd.read_excel(df, sheet_name=name[9]) | pandas.read_excel |
import pandas as pd
from Data_BCG.Download_Data import scraping_Functions as sf
# performance = sf.get_aggregated_season_data(1980)
per_game_data = sf.get_game_data(2009)
birthplaces = sf.get_birthplaces()
high_schools = sf.get_high_school_cities()
player_id = sf.get_players_id()
# Standarizing each database
# I removed the * at the end of the name of the players from every player
# performance.player = performance.player.str.replace("\*", "")
player_id = player_id.rename(str.lower, axis="columns")
per_game_data = pd.merge(per_game_data, player_id, how="left", on="id")
names = per_game_data["player"]
per_game_data.drop("player", axis=1, inplace=True)
per_game_data.insert(0, "player", names)
# Eliminated columns that weren't useful to the final database
high_schools = high_schools.iloc[:, [0, 1, 2]].rename(str.lower, axis="columns")
high_schools.player = high_schools.player.str.replace("\*", "")
# Restricted the sample to players that were born in the US
# Eliminated columns that weren't useful to the final database
birthplaces.player = birthplaces.player.str.replace("\*", "")
birthplaces = birthplaces[birthplaces.country_iso2 == "US"]
birthplaces = birthplaces.iloc[:, [0, 1, 3]]
# Merged the databases into one, restricting the data with "inner"
# performance_aggr = pd.merge(performance, birthplaces, how="inner", on="player")
# performance_aggr = pd.merge(performance_aggr, high_schools, how="inner", on=["player", "state"])
per_game_aggr = pd.merge(per_game_data, birthplaces, how="inner", on="player")
per_game_aggr = | pd.merge(per_game_aggr, high_schools, how="inner", on=["player", "state"]) | pandas.merge |
"""
.. _logs:
Log File Analysis (experimental)
================================
Logs contain very detailed information about events happening on computers.
And the extra details that they provide, come with additional complexity that
we need to handle ourselves. A pageview may contain many log lines, and a
session can consist of several pageviews for example.
Another important characterisitic of log files is that their are usualy not
big.
They are massive.
So, we also need to cater for their large size, as well as rapid changes.
TL;DR
>>> import advertools as adv
>>> import pandas as pd
>>> adv.logs_to_df(log_file='access.log',
... output_file='access_logs.parquet',
... errors_file='log_errors.csv',
... log_format='common',
... fields=None)
>>> logs_df = pd.read_parquet('access_logs.parquet')
How to run the :func:`logs_to_df` function:
-------------------------------------------
* ``log_file``: The path to the log file you are trying to analyze.
* ``output_file``: The path to where you want the parsed and compressed file
to be saved. Only the `parquet` format is supported.
* ``errors_file``: You will almost certainly have log lines that don't conform
to the format that you have, so all lines that weren't properly parsed would
go to this file. This file also contains the error messages, so you know what
went wrong, and how you might fix it. In some cases, you might simply take
these "errors" and parse them again. They might not be really errors, but
lines in a different format, or temporary debug messages.
* ``log_format``: The format in which your logs were formatted. Logs can (and
are) formatted in many ways, and there is no right or wrong way. However,
there are defaults, and a few popular formats that most servers use. It is
likely that your file is in one of the popular formats. This parameter can
take any one of the pre-defined formats, for example "common", or "extended",
or a regular expression that you provide. This means that you can parse any
log format (as long as lines are single lines, and not formatted in JSON).
* ``fields``: If you selected one of the supported formats, then there is no
need to provide a value for this parameter. You have to provide a list of
fields in case you provide a custom (regex) format. The fields will become
the names of the columns of the resulting DataFrame, so you can distinguish
between them (client, time, status code, response size, etc.)
Supported Log Formats
---------------------
* `common`
* `combined` (a.k.a "extended")
* `common_with_vhost`
* `nginx_error`
* `apache_error`
Parse and Analyze Crawl Logs in a Dataframe
===========================================
While crawling with the :func:`crawl` function, the process produces logs for
every page crawled, scraped, redirected, and even blocked by robots.txt rules.
By default, those logs are can be seen on the command line as their default
destination is stdout.
A good practice is to set a ``LOG_FILE`` so you can save those logs to a text
file, and review them later. There are several reasons why you might want to do
that:
* Blocked URLs: The crawler obeys robots.txt rules by default, and when it
encounters pages that it shouldn't crawl, it doesn't. However, this is logged
as an event, and you can easily extract a list of blocked URLs from the logs.
* Crawl errors: You might also get some errors while crawling, and it can be
interesting to know which URLs generated errors.
* Filtered pages: Those are pages that were discovered but weren't crawled
because they are not a sub-domain of the provided url_list, or happen to be
on external domains altogether.
This can simply be done by specifying a file name through the optional
`custom_settings` parameter of ``crawl``:
>>> import advertools as adv
>>> adv.crawl('https://example.com',
output_file='example.jl',
follow_links=True,
custom_settings={'LOG_FILE': 'example.log'})
If you run it this way, all logs will be saved to the file you chose,
`example.log` in this case.
Now, you can use the :func:`crawllogs_to_df` function to open the logs in a
DataFrame:
>>> import advertools as adv
>>> logs_df = adv.crawllogs_to_df('example.log')
The DataFrame might contain the following columns:
* `time`: The timestamp for the process
* `middleware`: The middleware responsible for this process, whether it is the
core engine, the scraper, error handler and so on.
* `level`: The logging level (DEBUG, INFO, etc.)
* `message`: A single word summarizing what this row represents, "Crawled",
"Scraped", "Filtered", and so on.
* `domain`: The domain name of filtered (not crawled pages) typically for URLs
outside the current website.
* `method`: The HTTP method used in this process (GET, PUT, etc.)
* `url`: The URL currently under process.
* `status`: HTTP status code, 200, 404, etc.
* `referer`: The referring URL, where applicable.
* `method_to`: In redirect rows the HTTP method used to crawl the URL going to.
* `redirect_to`: The URL redirected to.
* `method_from`: In redirect rows the HTTP method used to crawl the URL coming
from.
* `redirect_from`: The URL redirected from.
* `blocked_urls`: The URLs that were not crawled due to robots.txt rules.
"""
import os
import re
from pathlib import Path
from tempfile import TemporaryDirectory
import pandas as pd
LOG_FORMATS = {
'common': r'^(?P<client>\S+) \S+ (?P<userid>\S+) \[(?P<datetime>[^\]]+)\] "(?P<method>[A-Z]+) (?P<request>[^ "]+)? HTTP/[0-9.]+" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-)$',
'combined': r'^(?P<client>\S+) \S+ (?P<userid>\S+) \[(?P<datetime>[^\]]+)\] "(?P<method>[A-Z]+) (?P<request>[^ "]+)? HTTP/[0-9.]+" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-) "(?P<referrer>[^"]*)" "(?P<useragent>[^"]*)"$',
'common_with_vhost': r'^(?P<vhost>\S+) (?P<client>\S+) \S+ (?P<userid>\S+) \[(?P<datetime>[^\]]+)\] "(?P<method>[A-Z]+) (?P<request>[^ "]+)? HTTP/[0-9.]+" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-)$',
'nginx_error': r'^(?P<datetime>\d{4}/\d\d/\d\d \d\d:\d\d:\d\d) \[(?P<level>[^\]]+)\] (?P<pid>\d+)#(?P<tid>\d+): (?P<counter>\*\d+ | )?(?P<message>.*)',
'apache_error': r'^(?P<datetime>\[[^\]]+\]) (?P<level>\[[^\]]+\]) \[pid (?P<pid>\d+)\] (?P<file>\S+):(?P<status> \S+| ):? \[client (?P<client>\S+)\] (?P<message>.*)',
}
LOG_FIELDS = {
'common': ['client', 'userid', 'datetime', 'method', 'request', 'status',
'size'],
'combined': ['client', 'userid', 'datetime', 'method', 'request', 'status',
'size', 'referer', 'user_agent'],
'common_with_vhost': ['virtual_host', 'client', 'userid', 'datetime',
'method', 'request', 'status', 'size'],
'nginx_error': ['datetime', 'level', 'process_id', 'thread_id', 'counter',
'message'],
'apache_error': ['datetime', 'level', 'process_id', 'file', 'status',
'client', 'message'],
}
def logs_to_df(log_file, output_file, errors_file, log_format, fields=None):
"""Parse and compress any log file into a DataFrame format.
Convert a log file to a `parquet` file in a DataFrame format, and save all
errors (or lines not conformig to the chosen log format) into a separate
``errors_file`` text file. Any non-JSON log format is possible, provided
you have the right regex for it. A few default ones are provided and can be
used. Check out ``adv.LOG_FORMATS`` and ``adv.LOG_FIELDS`` for the
available formats and fields.
>>> import advertools as adv
>>> import pandas as pd
>>> adv.logs_to_df(log_file='access.log',
... output_file='access_logs.parquet',
... errors_file='log_errors.csv',
... log_format='common',
... fields=None)
>>> logs_df = pd.read_parquet('access_logs.parquet')
You can now analyze ``logs_df`` as a normal pandas DataFrame.
:param str log_file: The path to the log file.
:param str output_file: The path to the desired output file. Must have a
".parquet" extension, and must not have the same
path as an existing file.
:param str errors_file: The path where the parsing errors are stored. Any
text format works, CSV is recommended to easily
open it with any CSV reader with the separator as
"@@".
:param str log_format: Either the name of one of the supported log formats,
or a regex of your own format.
:param str fields: A list of fields, which will become the names of columns
in ``output_file``. Only required if you provide a
custom (regex) ``log_format``.
"""
if not output_file.endswith('.parquet'):
raise ValueError("Please provide an `output_file` with a `.parquet` "
"extension.")
for file in [output_file, errors_file]:
if os.path.exists(file):
raise ValueError(f"The file '{file}' already exists. "
"Please rename it, delete it, or choose another "
"file name/path.")
regex = LOG_FORMATS.get(log_format) or log_format
columns = fields or LOG_FIELDS[log_format]
with TemporaryDirectory() as tempdir:
tempdir_name = Path(tempdir)
with open(log_file) as source_file:
linenumber = 0
parsed_lines = []
for line in source_file:
linenumber += 1
try:
log_line = re.findall(regex, line)[0]
parsed_lines.append(log_line)
except Exception as e:
with open(errors_file, 'at') as err:
err_line = line[:-1] if line.endswith('\n') else line
print('@@'.join([str(linenumber), err_line, str(e)]),
file=err)
pass
if linenumber % 250_000 == 0:
print(f'Parsed {linenumber:>15,} lines.', end='\r')
df = pd.DataFrame(parsed_lines, columns=columns)
df.to_parquet(tempdir_name / f'file_{linenumber}.parquet')
parsed_lines.clear()
else:
print(f'Parsed {linenumber:>15,} lines.', end='\r')
df = | pd.DataFrame(parsed_lines, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures, StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_error, accuracy_score, classification_report, f1_score, roc_auc_score, roc_curve, plot_roc_curve, confusion_matrix
from sklearn.model_selection import GridSearchCV, learning_curve, train_test_split
from sklearn.pipeline import make_pipeline, FeatureUnion
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.preprocessing import FunctionTransformer
from sklearn.compose import make_column_transformer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import VotingClassifier, RandomForestClassifier, BaggingClassifier
from EDA_utils import make_column_transformerz, columnseparater, standardscaler, categorical_data, DFFeatureUnion
import joblib
import streamlit as st
import pickle
import SessionState
from PIL import Image
import plotly.express as px
try:
import streamlit.ReportThread as ReportThread
from streamlit.server.Server import Server
except Exception:
# Streamlit >= 0.65.0
import streamlit.report_thread as ReportThread
from streamlit.server.server import Server
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
def prediction(X):
output = []
joblib_file_hard = "Voting_hard_classifier_model.pkl"
joblib_file_soft = "Voting_soft_classifier_model.pkl"
joblib_file_rf = "random_forest_classifier_model.pkl"
vc_hard = joblib.load(joblib_file_hard)
vc_soft = joblib.load(joblib_file_soft)
rf = joblib.load(joblib_file_rf)
output.append(vc_hard.predict(X))
output.append(vc_soft.predict(X))
output.append(rf.predict(X))
unique, counts = np.unique(np.asarray(output), return_counts=True)
return output, unique, counts
def visualize_titanic_pre(df):
df['Family_group'] = pd.cut(
df.SibSp, bins=[-1, 2, 4, 10], labels=['Small', 'Medium', 'Large'])
df['Parent_Count'] = pd.cut(
df.Parch, bins=[-1, 0.5, 1.5, 10], labels=['No_parent', 'single_parent', 'big_family'])
df.drop(['PassengerId', 'Cabin', 'Ticket'], axis=1, inplace=True)
for i in range(1, 4):
df.Fare[df.Pclass.eq(i)] = df.Fare[df.Pclass.eq(i)].replace(
0, df.Fare[df.Pclass.eq(i)].mean())
df.Age = df.Age.replace(np.NaN, 0)
age_mean = np.mean(df.Age)
df.Age = (df.Age.replace(0, age_mean))
df.dropna(inplace=True)
df.Survived = df.Survived.replace(1, 'Survived')
df.Survived = df.Survived.replace(0, 'Not Survived')
return df
def titanic_pre(df):
df['Family_group'] = pd.cut(
df.SibSp, bins=[-1, 2, 4, 10], labels=['Small', 'Medium', 'Large']).astype('object')
df['Parent_Count'] = pd.cut(
df.Parch, bins=[-1, 0.5, 1.5, 10], labels=['No_parent', 'single_parent', 'big_family']).astype('object')
return df
#Pipeline Section
num = ['Age', 'Fare']
cat = ['Pclass', 'Name', 'Embarked', 'Family_group', 'Parent_Count']
pipeline = make_pipeline(
make_pipeline(
make_column_transformerz(SimpleImputer(
strategy='most_frequent'), ['Embarked']),
make_column_transformerz(SimpleImputer(
strategy='most_frequent'), ['Fare']),
make_column_transformerz(SimpleImputer(
strategy='most_frequent'), ['Age']),
make_column_transformerz(FunctionTransformer(np.log), ['Fare']),
),
DFFeatureUnion([
make_pipeline(
columnseparater(num),
standardscaler()
),
make_pipeline(
columnseparater(cat),
categorical_data()
)
])
)
filename = "train_dataset.pkl"
train_data = pickle.load(open(filename, 'rb'))
train_data = pipeline.fit_transform(train_data)
st.set_page_config(layout="wide")
theme = st.beta_columns(8)
agree = theme[7].checkbox('Dark Theme')
if agree:
st.markdown(
f"""
<style>
.reportview-container {{
background-color: #000000;
background-image: none;
color: #ffffff
}}
</style>
""",
unsafe_allow_html=True
)
st.markdown("""<style>
.css-1aumxhk {
background-color: #000000;
background-image: none;
color: #ffffff
}
</style>""", unsafe_allow_html=True)
dataset = 'Titanic_Complete_Dataset.csv'
df = visualize_titanic_pre(pd.read_csv(dataset))
inp = st.sidebar.selectbox('', ['Titanic Prediction', 'Titanic Visualization'])
if inp == 'Titanic Visualization':
local_css("style.css")
state = SessionState.get(flag=False)
st.sidebar.markdown('''<h1 style="color:#ffffff"> Welcome to Titanic Prediction Tool</h1>''', unsafe_allow_html=True)
st.sidebar.markdown('''<h3 style="color:#ce222f"> <b> <u>
Below are the creator details: </u></b></h3>''', unsafe_allow_html=True)
st.sidebar.markdown('''#### Name : <NAME> (<<EMAIL>>)\n
#### LinkedIn : <https://www.linkedin.com/in/john-pravin/>
#### GitHub : <https://github.com/JohnPravin97> ''')
st.sidebar.markdown('''<h3 style="color:#ce222f"> <u> Special thanks to </u> </h3>''', unsafe_allow_html=True)
st.sidebar.markdown('''
#### Name : <NAME> (<https://www.linkedin.com/in/aakash-nagarajan-28325510a/>)''')
#Main Coding
st.markdown('''<div align="center"> <h1> <b> Welcome to Titanic Visualization Tool </b> </h1> </div>''', unsafe_allow_html=True)
img = st.beta_columns(3)
img[1].image(Image.open('Titanic_Visualization.jpg'), width=425, caption='Titanic')
st.markdown('''
<h3 style="color:#ce222f"> <u> <b> INTRODUCTION: </b> </h3> </u>''', unsafe_allow_html=True)
st.markdown('''This is a visual representation of the original titanic dataset, the plot represents the details of entries in the dataset. The details about attributes of these entries can be found in the visual representation.\n
''')
st.markdown('''
<h3 style="color:#ce222f"> <b> <u> USER GUIDES: </b> </h3> </u>''', unsafe_allow_html=True)
st.markdown(''' Points to consider while working with this tool: \n
1. User can press me 'Visualize Me' button with the default selections to visualize the dataset.
2. Plotly library is used to make the visualization of original titanic dataset.
3. Thus, plotly features are available on the top right corner of the plot.
4. The Animations and Size columns has two options: 1. Pclass, 2. Sex and 1. SibSp, 2.Parch respectively.
5. User has to switch back to 'Titanic Prediction' for description of the columns.
6. User has to switch back to 'Titanic Prediction' for providing feedback and comments.
''')
st.markdown('''
<h3 style="color:#ce222f"> <u> <b> LETS GET STARTED: </b> </h3> </u>''', unsafe_allow_html=True)
cols = st.beta_columns(6)
x_axis = cols[0].selectbox('X-axis', ['Age'])
y_axis = cols[1].selectbox('Y_axis', ['Fare'])
colors = cols[2].selectbox('Colors', ['Survived'])
columns = cols[3].selectbox('Facet-Cols', ['Sex', 'Pclass'])
size = cols[5].selectbox('Size', ['Parch', 'SibSp'])
hover_name = 'Name'
hover_data = ['SibSp', 'Parch', 'Embarked']
if columns == 'Pclass':
animation = cols[4].selectbox('Animations', ['Sex'])
elif columns =='Sex':
animation = cols[4].selectbox('Animations', ['Pclass'])
if cols[3].button('Visualize Me'):
st.markdown('***Please wait while it is loading and click "Autoscale" in plotly to visualize it efficiently***')
st.plotly_chart(px.scatter(df, x=x_axis, y=y_axis, title='Titanic Visualization',size=size, color = colors, facet_col = columns, hover_name=hover_name, hover_data=hover_data, animation_frame=animation, height=600, width=1000))
else:
local_css("style.css")
state = SessionState.get(flag=False)
st.sidebar.markdown('''<h1 style="color:#ffffff"> Welcome to Titanic Prediction Tool</h1>''', unsafe_allow_html=True)
st.sidebar.markdown('''<h3 style="color:#ce222f"> <b> <u>
Below are the creator details: </u></b></h3>''', unsafe_allow_html=True)
st.sidebar.markdown('''#### Name : <NAME> (<<EMAIL>>)\n
#### LinkedIn : <https://www.linkedin.com/in/john-pravin/>
#### GitHub : <https://github.com/JohnPravin97> ''')
st.sidebar.markdown('''<h3 style="color:#ce222f"> <u> Special thanks to </u> </h3>''', unsafe_allow_html=True)
st.sidebar.markdown('''
#### Name : <NAME> (<https://www.linkedin.com/in/aakash-nagarajan-28325510a/>)''')
feedback_save = pd.HDFStore('feedback.h5')
comment_save = pd.HDFStore('comment.h5')
st.sidebar.markdown('''<h3 style="color:#ce222f"> <u>Feedback: </u></h3>''', unsafe_allow_html=True)
Name = st.sidebar.text_input('Please Enter Your Name*')
feedlist = ['Better', 'Normal', 'Worst']
feedback = st.sidebar.radio('Please provide your feedback below*', feedlist)
feedback_comment = st.sidebar.text_input('Additional Comments')
st.sidebar.markdown('''<p style="font-size:60%;"> * mandatory fields </p>''', unsafe_allow_html=True)
if st.sidebar.button('Send Feedback'):
if (not Name):
st.sidebar.markdown('''<p style="font-size:80%;"> <b> Please provide a name to save your feedback </b></p>''', unsafe_allow_html=True)
else:
feedback_df = pd.DataFrame(feedback, index=[0], columns=['Feedback'])
feedback_df['Name']=Name.upper()
feedback_save.put(Name.upper(), feedback_df)
feedback_save.close()
if (not feedback_comment):
pass
else:
comment_df = pd.DataFrame(feedback_comment, index=[0], columns=['Comments'])
comment_df['Name']=Name.upper()
comment_save.put(Name.upper(), comment_df)
comment_save.close()
st.sidebar.write('Thanks for your feedback')
st.sidebar.markdown('''<h3 style="color:#ce222f"><u> Disclaimer: </u> </h3>''', unsafe_allow_html=True)
st.sidebar.write('Input name and data are being stored for further improvements of the tool')
# Main Coding
st.markdown('''<div align="center"> <h1 style="color:#ffffff">Welcome to Titanic Survival Prediction Tool </b> </h1> </div>''', unsafe_allow_html=True)
img = st.beta_columns(3)
img[1].image(Image.open('Titanic.jpg'), width=425, caption='Titanic')
st.markdown('''
<h3 style="color:#ce222f"><u> <b> INTRODUCTION: </b> </h3> </u>''', unsafe_allow_html=True)
st.markdown('''<p style="color:#ffffff">Ever had a dream of travelling on the titanic? ever wondered if you would have survived the unfortunate incident if you got a ticket aboard the dreamy cruise for a journey in the year 1912? We got you covered:\n
<p style="color:#ffffff">This is a prediction system which can be used to predict the chance of a person surviving the titanic incident. The system asks the user input for data and predicts the chance of survival of the user in the titanic incident. This model is trained using the original titanic data set with basic machine learning techniques like regression, and to increase the accuracy of prediction it was later combined with advanced machine learning techniques such as the ensemble learning and integrated all of these into an end to end pipeline architecture.\n
<p style="color:#ffffff"> The following are the data one has to provide : Pclass, Name, Sex, Age, Fare, SibSp, Parch, Embarked. Refer the below describtion for more detailed explanation on each parameters </p>''', unsafe_allow_html=True)
markdwn = st.beta_columns(3)
markdw = st.beta_columns(2)
markdwn[0].markdown('''
<h3 style="color:#ce222f"> <u> <b> VARIABLE DESCRIPTIONS: </b> </h3> </u>''', unsafe_allow_html=True)
markdw[0].markdown(''' <div style="color:#ffffff">
\t <b> Pclass </b> -> Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)\n <div style="color:#ffffff">
<b>Name </b> -> Name of the passenger\n <div style="color:#ffffff">
<b>Sex </b> -> Sex\n <div style="color:#ffffff">
<b>Age </b> -> Age\n </div> <div style="color:#ffffff">
''', unsafe_allow_html=True)
markdw[1].markdown('''<div style="color:#ffffff">
<b>Sibsp</b> -> Number of Siblings/Spouses Aboard\n <div style="color:#ffffff">
<b>Parch</b> -> Number of Parents/Children Aboard\n <div style="color:#ffffff">
<b>Fare</b> -> Passenger Fare (British pound)\n <div style="color:#ffffff">
<b>Embarked</b> -> Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)\n
''', unsafe_allow_html=True)
st.markdown('''
<h3 style="color:#ce222f"> <u> <b> USER GUIDES: </b> </h3> </u>''', unsafe_allow_html=True)
st.markdown(''' Points to consider while working with this tool: \n
1. User has to fill the Pclass and press 'Enter' button to get the remaining parameters.
2. User has to fill his first name to proceed further with the prediction.
3. User has to follow and provide the values between the range mentioned for Age and Fare.
4. Upon filling all the required parameters, press 'Predict' button to proceed.
5. After filling all the parameters, the users are provided with dataframe containing their inputs for reference.
6. Sex parameter is automatically taken based on the Title given.
7. Family Group and Parent Count are calculated based on the SibSp and Parch parameters.
8. Press 'Refresh' button to try with new data.
9. Select 'Titanic Visualization' box to visualize the original titanic dataset.
10. Provide feedback and comments from sidebar for further improving the tool.
11. Please note that same name with two predictions are not allowed. User need to change the name and data for predicting again.
''')
st.markdown('''
<h3 style="color:#ce222f"><u> <b> LETS GET STARTED: </b> </h3> </u>''', unsafe_allow_html=True)
initial_cols = st.beta_columns(3)
initial_lis = {'Pclass': ['<select>',1,2,3]}
Pclass = (initial_cols[1].selectbox('Pclass', initial_lis['Pclass']))
if (Pclass=='<select>'):
initial_cols[1].write('Please select the Pclass to Proceed')
state.flag=False
else:
if (initial_cols[1].button('Enter') or state.flag ==True):
Pclass = int(Pclass)
if Pclass==3:
st.markdown('''<div align="center" style="color:#ffffff"> <h4> You have selected Pclass as 3 and provide information below details to predict your titanic survival fate<br> </br> </h4> </div>''', unsafe_allow_html=True )
name_cols = st.beta_columns(5)
cols = st.beta_columns(5)
lis = {
'Name':['Mr.', 'Miss.', 'Mrs.', 'Master.'],
'SibSp':[0,1,2,3,4,5,8],
'Parch':[0,1,2,3,4,5,6],
'Embarked':['S', 'C', 'Q']
}
Name = name_cols[1].selectbox('Pronoun', lis['Name'])
First_Name = name_cols[2].text_input('Please Enter Your First Name')
Last_Name = name_cols[3].text_input('Please Enter Your last Name')
#To select age range from the Name
if Name == 'Master.':
min_age_value=0
max_age_value=12
elif Name=='Mr.':
min_age_value=15
max_age_value=74
elif Name=='Miss.':
min_age_value=0
max_age_value=45
elif Name=='Mrs.':
min_age_value=15
max_age_value=65
Age = cols[0].text_input('Enter the Age ' + str(min_age_value) + ' to ' + str(max_age_value))
SibSp = int(cols[1].selectbox('SibSp', lis['SibSp']))
Parch = int(cols[2].selectbox('Parch', lis['Parch']))
Fare = cols[3].text_input('Enter the Fare btw 4-70')
Embarked = cols[4].selectbox('Embarked', lis['Embarked'])
name_temp = str(First_Name).upper()
#To select the sex of the person based on the Name
if Name in ['Mr.', 'Master.']:
Sex = 'Male'
else:
Sex = 'Female'
state.flag=True
if (not First_Name):
st.write('Provide first name to proceed')
else:
if (cols[0].button('Predict')):
dataframe_save = pd.HDFStore('titanic_search_dataframe.h5')
try:
dic = {'Pclass': Pclass, 'Pronoun': Name, 'Sex': Sex, 'Age': Age, 'SibSp': SibSp, 'Parch': Parch, 'Fare': Fare, 'Embarked': Embarked}
X = pd.DataFrame(dic, index=[0])
X.Age = X.Age.astype('int64')
X.Fare = X.Fare.astype('int64')
if X.Age.values > max_age_value or X.Age.values < min_age_value or X.Fare.values > 70 or X.Fare.values < 4:
st.write('Please provide Age between ' + str(min_age_value) + ' to ' + str(max_age_value) +', Fare value between 4-70 and Select Predict to continue')
else:
if ('/' + name_temp) in dataframe_save.keys():
st.markdown('**Prediction for this data is already done. Please change the data and first name to continue**')
df = dataframe_save.get(name_temp)
st.dataframe(df)
else:
st.markdown(' <h4 style="color:#ce222f"> <b> Input Dataframe for reference </b> <h4>', unsafe_allow_html=True)
temp = titanic_pre(X)
st.dataframe(temp)
x_test = pipeline.transform(X)
output, unique, counts = prediction(x_test)
temp_2 = unique[counts.argmax()]
temp['Survived'] = temp_2
dataframe_save.put(name_temp, temp, data_columns=True)
dataframe_save.close()
if unique[counts.argmax()] == 1:
st.markdown(' <h4 style="color:#ce222f"> <b> RESULT: You would have Survived based on the input data </b> <h4>', unsafe_allow_html=True)
st.write(' Please refer the below dataframe for each model outputs: **0 -> Not Survived, 1 -> Survived** ', unsafe_allow_html=True)
st.dataframe(pd.DataFrame(output, index=['Voting_Classifier_Hard', 'Voting_Classifier_Soft', 'RandomForest'], columns=['Output']))
if st.button('Refresh'):
state=False
elif unique[counts.argmax()] ==0:
st.write('<h4 style="color:#ce222f"> <b> RESULT: Sorry to Say, you would have not survived based on the input data </b> <h4>', unsafe_allow_html=True)
st.write('Please refer the below dataframe for each model outputs: **0 -> Not Survived, 1 -> Survived** ', unsafe_allow_html=True)
st.dataframe(pd.DataFrame(output, index=['Voting_Classifier_Hard', 'Voting_Classifier_Soft', 'RandomForest'], columns=['Output']))
st.write('**Thanks for using the app**. Please press **"Refresh" button** to continue with new prediction. Also, please provide **feedback from sidebar options** once done')
if st.button('Refresh'):
state=False
else:
st.write('invaild output - please start from beginning')
except:
st.write('Please enter ' + str(min_age_value) + ' to ' + str(max_age_value) +' in Age text box and between 4-70 in Fare text box. Please don\'t provide any string values')
elif Pclass==2:
st.markdown('''<div align="center"> <h4> You have selected Pclass as 2 and provide information below details to predict your titanic survival fate<br> </br> </h4> </div>''', unsafe_allow_html=True )
name_cols = st.beta_columns(5)
cols = st.beta_columns(5)
lis = {
'Name':['Mr.', 'Mrs.', 'Miss.', 'Master.', 'Rev.', 'Dr.', 'Ms.'],
'SibSp':[0,1,2,3],
'Parch':[0,1,2,3],
'Embarked':['S', 'C', 'Q']
}
Name = name_cols[1].selectbox('Pronoun', lis['Name'])
First_Name = name_cols[2].text_input('Please Enter Your First Name')
Last_Name = name_cols[3].text_input('Please Enter Your last Name')
name_temp = str(First_Name).upper()
#To select age range from the Name
if Name == 'Master.':
min_age_value=0
max_age_value=8
elif Name=='Mr.':
min_age_value=16
max_age_value=70
elif Name=='Miss.':
min_age_value=2
max_age_value=50
elif Name=='Mrs.':
min_age_value=14
max_age_value=55
elif Name=='Rev.':
min_age_value=27
max_age_value=57
elif Name=='Dr.':
min_age_value=23
max_age_value=54
elif Name=='Ms.':
min_age_value=20
max_age_value=30
Age = cols[0].text_input('Enter the Age ' + str(min_age_value) + ' to ' + str(max_age_value))
SibSp = int(cols[1].selectbox('SibSp', lis['SibSp']))
Parch = int(cols[2].selectbox('Parch', lis['Parch']))
Fare = cols[3].text_input('Enter the Fare btw 10-75')
Embarked = cols[4].selectbox('Embarked', lis['Embarked'])
#To select the sex of the person based on the Name
if Name in ['Mr.', 'Master.', 'Rev.', 'Dr.', 'Capt.', 'Col.', 'Major.', 'Don.']:
Sex = 'Male'
else:
Sex = 'Female'
state.flag=True
if (not First_Name):
st.write('Provide first name to proceed')
else:
if (cols[0].button('Predict')):
dataframe_save = pd.HDFStore('titanic_search_dataframe.h5')
try:
dic = {'Pclass': Pclass, 'Pronoun': Name, 'Sex': Sex, 'Age': Age, 'SibSp': SibSp, 'Parch': Parch, 'Fare': Fare, 'Embarked': Embarked}
X = pd.DataFrame(dic, index=[0])
X.Age = X.Age.astype('int64')
X.Fare = X.Fare.astype('int64')
if X.Age.values > max_age_value or X.Age.values < min_age_value or X.Fare.values > 75 or X.Fare.values < 10:
st.write('Please provide Age between ' + str(min_age_value) + ' to ' + str(max_age_value) +', Fare value between 10-75 and Select Predict to continue')
else:
if ('/' + name_temp) in dataframe_save.keys():
st.markdown('**Prediction for this data is already done. Please change the data and first name to continue**')
df = dataframe_save.get(name_temp)
st.dataframe(df)
else:
st.markdown('<h4 style="color:#ce222f"> <b> Input Dataframe for reference </b> <h4>', unsafe_allow_html=True)
temp = titanic_pre(X)
st.dataframe(temp)
x_test = pipeline.transform(X)
output, unique, counts = prediction(x_test)
temp_2 = unique[counts.argmax()]
temp['Survived'] = temp_2
dataframe_save.put(name_temp, temp, data_columns=True)
dataframe_save.close()
if unique[counts.argmax()] == 1:
st.markdown(' <h4 style="color:#ce222f"> <b> RESULT: You would have Survived based on the input data </b> <h4>', unsafe_allow_html=True)
st.write(' Please refer the below dataframe for each model outputs: **0 -> Not Survived, 1 -> Survived** ', unsafe_allow_html=True)
st.dataframe(pd.DataFrame(output, index=['Voting_Classifier_Hard', 'Voting_Classifier_Soft', 'RandomForest'], columns=['Output']))
st.write('**Thanks for using the app**. Please press **"Refresh" button** to continue with new prediction. Also, please provide **feedback from sidebar options** once done')
if st.button('Refresh'):
state=False
elif unique[counts.argmax()] ==0:
st.write('<h4 style="color:#ce222f"> <b> RESULT: Sorry to Say, you would have not survived based on the input data </b> <h4>', unsafe_allow_html=True)
st.write('Please refer the below dataframe for each model outputs: **0 -> Not Survived, 1 -> Survived** ', unsafe_allow_html=True)
st.dataframe(pd.DataFrame(output, index=['Voting_Classifier_Hard', 'Voting_Classifier_Soft', 'RandomForest'], columns=['Output']))
st.write('**Thanks for using the app**. Please press **"Refresh" button** to continue with new prediction. Also, please provide **feedback from sidebar options** once done')
if st.button('Refresh'):
state=False
else:
st.write('invaild output - please start from beginning')
except:
st.write('Please enter ' + str(min_age_value) + ' to ' + str(max_age_value) +' in Age text box and between 10-75 in Fare text box. Please don\'t provide any string values')
elif Pclass==1:
st.markdown('''<div align="center"> <h4> You have selected Pclass as 1 and provide information below details to predict your titanic survival fate<br> </br> </h4> </div>''', unsafe_allow_html=True)
name_cols = st.beta_columns(5)
cols = st.beta_columns(5)
lis = {
'Name':['Mr.', 'Miss.', 'Mrs.', 'Dr.', 'Master.', 'Mlle.', 'Col.', 'Major.', 'Capt.', 'Don.'],
'Sex':['Male', 'Female'],
'SibSp':[0,1,2,3],
'Parch':[0,1,2,4],
'Embarked':['S', 'C', 'Q']
}
Name = name_cols[1].selectbox('Pronoun', lis['Name'])
First_Name = name_cols[2].text_input('Please Enter Your First Name')
Last_Name = name_cols[3].text_input('Please Enter Your last Name')
name_temp = str(First_Name).upper()
#To select age range from the Name
if Name == 'Master.':
min_age_value=0
max_age_value=11
elif Name=='Mr.':
min_age_value=17
max_age_value=80
elif Name=='Miss.':
min_age_value=2
max_age_value=60
elif Name=='Mrs.':
min_age_value=17
max_age_value=60
elif Name=='Rev.':
min_age_value=27
max_age_value=57
elif Name=='Dr.':
min_age_value=32
max_age_value=50
elif Name=='Ms.':
min_age_value=20
max_age_value=30
elif Name=='Mlle.':
min_age_value=20
max_age_value=30
elif Name=='Col.':
min_age_value=55
max_age_value=60
elif Name=='Major.':
min_age_value=45
max_age_value=50
elif Name=='Capt.':
min_age_value=65
max_age_value=75
elif Name=='Don.':
min_age_value=40
max_age_value=50
Age = cols[0].text_input('Enter the Age ' + str(min_age_value) + ' to ' + str(max_age_value))
SibSp = int(cols[1].selectbox('SibSp', lis['SibSp']))
Parch = int(cols[2].selectbox('Parch', lis['Parch']))
Fare = cols[3].text_input('Enter the Fare btw 5-500')
Embarked = cols[4].selectbox('Embarked', lis['Embarked'])
#To select the sex of the person based on the Name
if Name in ['Mr.', 'Master.', 'Rev.', 'Dr.']:
Sex = 'Male'
else:
Sex = 'Female'
state.flag=True
if (not First_Name):
st.write('Provide first name to proceed')
else:
if (cols[0].button('Predict')):
dataframe_save = pd.HDFStore('titanic_search_dataframe.h5')
try:
dic = {'Pclass': Pclass, 'Pronoun': Name, 'Sex': Sex, 'Age': Age, 'SibSp': SibSp, 'Parch': Parch, 'Fare': Fare, 'Embarked': Embarked}
X = pd.DataFrame(dic, index=[0])
X.Age = X.Age.astype('int64')
X.Fare = X.Fare.astype('int64')
if X.Age.values > max_age_value or X.Age.values < min_age_value or X.Fare.values > 500 or X.Fare.values < 5:
st.write('Please provide Age between ' + str(min_age_value) + ' to ' + str(max_age_value) +', Fare value between 5-500 and Select Predict to continue')
else:
if ('/' + name_temp) in dataframe_save.keys():
st.markdown('**Prediction for this data is already done. Please change the data and first name to continue**')
df = dataframe_save.get(name_temp)
st.dataframe(df)
else:
st.markdown('<h4 style="color:#ce222f"> <b> Input Dataframe for reference </b> <h4>', unsafe_allow_html=True)
temp = titanic_pre(X)
st.dataframe(temp)
x_test = pipeline.transform(X)
output, unique, counts = prediction(x_test)
temp_2 = unique[counts.argmax()]
temp['Survived'] = temp_2
dataframe_save.put(name_temp, temp, data_columns=True)
dataframe_save.close()
if unique[counts.argmax()] == 1:
st.markdown(' <h4 style="color:#ce222f"> <b> RESULT: You would have Survived based on the input data </b> <h4>', unsafe_allow_html=True)
st.write(' Please refer the below dataframe for each model outputs: **0 -> Not Survived, 1 -> Survived** ', unsafe_allow_html=True)
st.dataframe(pd.DataFrame(output, index=['Voting_Classifier_Hard', 'Voting_Classifier_Soft', 'RandomForest'], columns=['Output']))
st.write('**Thanks for using the app**. Please press **"Refresh" button** to continue with new prediction. Also, please provide **feedback from sidebar options** once done')
if st.button('Refresh'):
state=False
elif unique[counts.argmax()] ==0:
st.write('<h4 style="color:#ce222f"> <b> RESULT: Sorry to Say, you would have not survived based on the input data </b> <h4>', unsafe_allow_html=True)
st.write('Please refer the below dataframe for each model outputs: **0 -> Not Survived, 1 -> Survived** ', unsafe_allow_html=True)
st.dataframe(pd.DataFrame(output, index=['Voting_Classifier_Hard', 'Voting_Classifier_Soft', 'RandomForest'], columns=['Output']))
st.write('**Thanks for using the app**. Please press **"Refresh" button** to continue with new prediction. Also, please provide **feedback from sidebar options** once done')
if st.button('Refresh'):
state=False
else:
st.write('invaild output - please start from beginning')
except:
st.write('Please enter ' + str(min_age_value) + ' to ' + str(max_age_value) +' in Age text box and between 5-500 in Fare text box. Please don\'t provide any string values')
else:
dataset = 'Titanic_Complete_Dataset.csv'
df = visualize_titanic_pre(pd.read_csv(dataset))
inp = st.sidebar.selectbox('', ['Titanic Prediction', 'Titanic Visualization'])
if inp == 'Titanic Visualization':
state = SessionState.get(flag=False)
st.sidebar.markdown('''# Welcome to Titanic Visualization Tool
### Below are the creator details
#### Name : <NAME> (<<EMAIL>>)
#### LinkedIn : <https://www.linkedin.com/in/john-pravin/>
#### GitHub : <https://github.com/JohnPravin97>
### Special thanks to
#### Name : <NAME> (<https://www.linkedin.com/in/aakash-nagarajan-28325510a/>)''')
#Main Coding
st.markdown('''<div align="center"> <h1> <b> Welcome to Titanic Visualization Tool </b> </h1> </div>''', unsafe_allow_html=True)
img = st.beta_columns(3)
img[1].image(Image.open('Titanic_Visualization.jpg'), width=425, caption='Titanic')
st.markdown('''<u>
<h3> <b> INTRODUCTION: </b> </h3> </u>''', unsafe_allow_html=True)
st.markdown('''This is a visual representation of the original titanic dataset, the plot represents the details of entries in the dataset. The details about attributes of these entries can be found in the visual representation.\n
''')
st.markdown('''<u>
<h3> <b> USER GUIDES: </b> </h3> </u>''', unsafe_allow_html=True)
st.markdown(''' Points to consider while working with this tool: \n
1. User can press me 'Visualize Me' button with the default selections to visualize the dataset.
2. Plotly library is used to make the visualization of original titanic dataset.
3. Thus, plotly features are available on the top right corner of the plot.
4. The Animations and Size columns has two options: 1. Pclass, 2. Sex and 1. SibSp, 2.Parch respectively.
5. User has to switch back to 'Titanic Prediction' for description of the columns.
6. User has to switch back to 'Titanic Prediction' for providing feedback and comments.
''')
st.markdown('''<u>
<h3> <b> LETS GET STARTED: </b> </h3> </u>''', unsafe_allow_html=True)
cols = st.beta_columns(6)
x_axis = cols[0].selectbox('X-axis', ['Age'])
y_axis = cols[1].selectbox('Y_axis', ['Fare'])
colors = cols[2].selectbox('colors', ['Survived'])
columns = cols[3].selectbox('Facet-Cols', ['Sex', 'Pclass'])
size = cols[5].selectbox('Size', ['Parch', 'SibSp'])
hover_name = 'Name'
hover_data = ['SibSp', 'Parch', 'Embarked']
if columns == 'Pclass':
animation = cols[4].selectbox('Animations', ['Sex'])
elif columns =='Sex':
animation = cols[4].selectbox('Animations', ['Pclass'])
if cols[3].button('Visualize Me'):
st.markdown('***Please wait while it is loading and click "Autoscale" in plotly to visualize it efficiently***')
st.plotly_chart(px.scatter(df, x=x_axis, y=y_axis, title='Titanic Visualization',size=size, color = colors, facet_col = columns, hover_name=hover_name, hover_data=hover_data, animation_frame=animation, height=600, width=1200))
else:
state = SessionState.get(flag=False)
st.sidebar.markdown('''# Welcome to Titanic Prediction Tool\n
### Below are the creator details
#### Name : <NAME> (<<EMAIL>>)\n
#### LinkedIn : <https://www.linkedin.com/in/john-pravin/>
#### GitHub : <https://github.com/JohnPravin97>
### Special thanks to
#### Name : <NAME> (<https://www.linkedin.com/in/aakash-nagarajan-28325510a/>)''')
feedback_save = | pd.HDFStore('feedback.h5') | pandas.HDFStore |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series( | range(10) | pandas.compat.range |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename, index_col=0)
df.reset_index(drop=True, inplace=True)
df = df[df['price'] >= 0]
df = df[df['sqft_living'] >= 0]
df = df[df['sqft_lot'] >= 0]
df = df[df['sqft_above'] >= 0]
df = df[df['sqft_basement'] >= 0]
df = df[df['yr_built'] >= 0]
df = df[(df['yr_built'] <= df['yr_renovated']) | (df['yr_renovated'] == 0)]
df['new_building'] = df['yr_built'].apply(lambda x: 1 if x >= 1990 else 0)
df['recently_renovated'] = df['yr_renovated'].apply(lambda x: 1 if x >= 1990 else 0)
df.drop(['date', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_lot15', 'sqft_living15'], axis=1, inplace=True)
df.dropna(inplace=True)
zip_data_frame = | pd.get_dummies(df['zipcode']) | pandas.get_dummies |
from mpl_toolkits import mplot3d
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from plotnine import *
import copy, math
dist = 10
def find_min_discm_each_hyperparam(df):
x = df.sort_values(by=['Discm_percent', 'Points-Removed']).groupby("Model-count", as_index=False).first()
assert len(x) == 240
return x
def process_rows(row, batches):
# global batches
model_count = 0
for perm in range(20):
for h1units in [16, 24, 32]:
for h2units in [8, 12]:
for batch in batches: # different batch sizes for this dataset
if perm == row['Dataperm'] and h1units == row['H1Units'] and h2units == row['H2Units'] and batch == row['Batch']:
return model_count
else:
model_count += 1
def process_dfs(name, batches, df):
# import ipdb; ipdb.set_trace()
if 'Model-count' in df.columns:
df['Model-count2'] = df.apply(process_rows, axis=1, args=((batches,)))
assert (df['Model-count'] == df['Model-count2']).all()
df.drop(columns=['Model-count2'], inplace=True)
else:
df['Model-count'] = df.apply(process_rows, axis=1, args=((batches,)))
assert len(df['Model-count'].unique()) == 240 and df['Model-count'].max() == 239 and df['Model-count'].min() == 0
df = df.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
# df = df[['Model-count','Discm_percent','Test_acc']]
df = df[['Model-count','Discm_percent','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df['diff'] = abs(df['Class0_Pos'] - df['Class1_Pos']) * 100
df['Test_acc'] = df['Test_acc'].apply(lambda x: x * 100)
df['Techniques'] = name
if len(name.split()) > 1:
words = name.split()
letters = [word[0] for word in words]
x = "".join(letters)
df['Baseline'] = x
else:
df['Baseline'] = name[:2]
return df
def boxplots_datasets(dataset, plot):
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique())) # sorting is important
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos'])*100
df_our['Test_acc'] = df_our['Test_acc'].apply(lambda x: x*100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm']*12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent','Test_acc','diff']]
df_adver['diff'] = df_adver['diff']*100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = df_nosensitive[['Model-count','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos'])*100
df_nosensitive['Discm_percent'] = 0.0
df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
# df_nosensitive = process_dfs("Sensitive Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv"))
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
df_main = pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our])
try:
assert(len(df_main) == 7*240 + 20)
except:
import ipdb; ipdb.set_trace()
if dataset == "compas-score":
dataset = "Recidivism-score"
elif dataset == "compas-ground":
dataset = "Recidivism-ground"
# df_main['Dataset'] = dataset.capitalize()
if dataset == "adult":
sizeofPSI = 4522200
id_ = "D1"
elif dataset == "adult_race":
sizeofPSI = 4313100
id_ = "D2"
elif dataset == "german":
sizeofPSI = 100000
id_ = "D3"
elif dataset == "student":
sizeofPSI = 64900
id_ = "D4"
elif dataset == "Recidivism-ground":
sizeofPSI = 615000
id_ = "D5"
elif dataset == "Recidivism-score":
sizeofPSI = 615000
id_ = "D6"
elif dataset == "default":
sizeofPSI = 3000000
id_ = "D7"
elif dataset == "salary":
sizeofPSI = 5200
id_ = "D8"
else:
raise NotImplementedError
df_main['Dataset'] = id_
precision = 1
if plot == 0:
min_discm = True
test_accuracy_for_min_discm = True
max_accuracy = True
discm_for_max_accuracy = True
median_discm = False
mean_accuracy = False
median_accuracy = False
if min_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].min():.{precision}e}", '0.0' ,f"{df_DIR['Discm_percent'].min():.{precision}e}", f"{df_ps['Discm_percent'].min():.{precision}e}", f"{df_massaging['Discm_percent'].min():.{precision}e}", f"{df_lfr['Discm_percent'].min():.{precision}e}", f"{df_adver['Discm_percent'].min():.{precision}e}", f"{df_our['Discm_percent'].min():.{precision}e}"])
print_to_tex(x, 'min-discm.tex', dataset)
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR['Test_acc'].max():.{precision}e}", f"{df_ps['Test_acc'].max():.{precision}e}", f"{df_massaging['Test_acc'].max():.{precision}e}", f"{df_lfr['Test_acc'].max():.{precision}e}", f"{df_adver['Test_acc'].max():.{precision}e}", f"{df_our['Test_acc'].max():.{precision}e}"])
print_to_tex(y, 'max-test-accuracy.tex', dataset)
if test_accuracy_for_min_discm:
# for sensitive there is always 0 discrimination.
z = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive['Test_acc'].max():.{precision}e}",
f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
print_to_tex(z, 'test_accuracy_for_min_discm.tex', dataset)
if median_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(0.0) ,f"{df_DIR['Discm_percent'].median():.{precision}e}", f"{df_ps['Discm_percent'].median():.{precision}e}", f"{df_massaging['Discm_percent'].median():.{precision}e}", f"{df_lfr['Discm_percent'].median():.{precision}e}", f"{df_adver['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Discm_percent'].median():.{precision}e}")])
print_to_tex(x, 'median-discm.tex', dataset)
if mean_accuracy:
a = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].mean():.{precision}e}", f"{df_nosensitive['Test_acc'].mean():.{precision}e}", f"{df_DIR['Test_acc'].mean():.{precision}e}", f"{df_ps['Test_acc'].mean():.{precision}e}", f"{df_massaging['Test_acc'].mean():.{precision}e}", f"{df_lfr['Test_acc'].mean():.{precision}e}", f"{df_adver['Test_acc'].mean():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].mean():.{precision}e}")])
print_to_tex(a, 'mean-test-accuracy.tex', dataset)
if median_accuracy:
b = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].median():.{precision}e}", f"{df_nosensitive['Test_acc'].median():.{precision}e}", f"{df_DIR['Test_acc'].median():.{precision}e}", f"{df_ps['Test_acc'].median():.{precision}e}", f"{df_massaging['Test_acc'].median():.{precision}e}", f"{df_lfr['Test_acc'].median():.{precision}e}", f"{df_adver['Test_acc'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].median():.{precision}e}")])
print_to_tex(b, 'median-test-accuracy.tex', dataset)
if discm_for_max_accuracy:
k = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
"0.0",
f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['Discm_percent'].min():.{precision}e}"])
print_to_tex(k, 'discm_for_max_accuracy.tex', dataset)
return df_main
def boxplots_datasets_dist(dataset, plot):
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_dist{dist}.csv")
batches = sorted(list(df1.Batch.unique())) # sorting is important
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count', 'Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos']) * 100 # Statistical parity diff
df_our['Test_acc'] = df_our['Test_acc'].apply(lambda x: x * 100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_dist{dist}.csv"))
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_dist{dist}.csv"))
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_dist{dist}.csv"))
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_dist{dist}.csv"))
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_dist{dist}.csv")
df_adver['Model-count'] = df_adver['Dataperm'] * 12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent','Test_acc','diff']]
df_adver['diff'] = df_adver['diff'] * 100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
# df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = process_dfs("Sens. Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_dist{dist}.csv"))
# df_nosensitive = df_nosensitive[['Model-count','Test_acc', 'Class0_Pos', 'Class1_Pos']]
# df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos'])*100
# df_nosensitive['Discm_percent'] = 0.0
# df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
# df_nosensitive['Techniques'] = "Sens. Removed"
# df_nosensitive['Baseline'] = "SR"
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_dist{dist}.csv"))
df_main = pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our], sort=True)
try:
assert(len(df_main) == 7*240 + 20)
except:
import ipdb; ipdb.set_trace()
if dataset == "compas-score":
dataset = "Recidivism-score"
elif dataset == "compas-ground":
dataset = "Recidivism-ground"
# df_main['Dataset'] = dataset.capitalize()
if dataset == "adult":
sizeofPSI = 4522200
id_ = "D1"
elif dataset == "adult_race":
sizeofPSI = 4313100
id_ = "D2"
elif dataset == "german":
sizeofPSI = 100000
id_ = "D3"
elif dataset == "student":
sizeofPSI = 64900
id_ = "D4"
elif dataset == "Recidivism-ground":
sizeofPSI = 615000
id_ = "D5"
elif dataset == "Recidivism-score":
sizeofPSI = 615000
id_ = "D6"
elif dataset == "default":
sizeofPSI = 3000000
id_ = "D7"
elif dataset == "salary":
sizeofPSI = 5200
id_ = "D8"
else:
raise NotImplementedError
df_main['Dataset'] = id_
precision = 1
if plot == 0:
min_discm = True
test_accuracy_for_min_discm = True
max_accuracy = True
discm_for_max_accuracy = True
median_discm = False
mean_accuracy = False
median_accuracy = False
if min_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].min():.{precision}e}", f"{df_nosensitive['Discm_percent'].min():.{precision}e}" ,f"{df_DIR['Discm_percent'].min():.{precision}e}", f"{df_ps['Discm_percent'].min():.{precision}e}", f"{df_massaging['Discm_percent'].min():.{precision}e}", f"{df_lfr['Discm_percent'].min():.{precision}e}", f"{df_adver['Discm_percent'].min():.{precision}e}", f"{df_our['Discm_percent'].min():.{precision}e}"])
print_to_tex(x, f'min-discm_dist{dist}.tex', dataset)
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR['Test_acc'].max():.{precision}e}", f"{df_ps['Test_acc'].max():.{precision}e}", f"{df_massaging['Test_acc'].max():.{precision}e}", f"{df_lfr['Test_acc'].max():.{precision}e}", f"{df_adver['Test_acc'].max():.{precision}e}", f"{df_our['Test_acc'].max():.{precision}e}"])
print_to_tex(y, f'max-test-accuracy_dist{dist}.tex', dataset)
if test_accuracy_for_min_discm:
z = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive.loc[df_nosensitive['Discm_percent'] == df_nosensitive['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
print_to_tex(z, f'test_accuracy_for_min_discm_dist{dist}.tex', dataset)
if median_discm:
raise NotImplementedError
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(0.0) ,f"{df_DIR['Discm_percent'].median():.{precision}e}", f"{df_ps['Discm_percent'].median():.{precision}e}", f"{df_massaging['Discm_percent'].median():.{precision}e}", f"{df_lfr['Discm_percent'].median():.{precision}e}", f"{df_adver['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Discm_percent'].median():.{precision}e}")])
print_to_tex(x, 'median-discm.tex', dataset)
if mean_accuracy:
raise NotImplementedError
a = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].mean():.{precision}e}", f"{df_nosensitive['Test_acc'].mean():.{precision}e}", f"{df_DIR['Test_acc'].mean():.{precision}e}", f"{df_ps['Test_acc'].mean():.{precision}e}", f"{df_massaging['Test_acc'].mean():.{precision}e}", f"{df_lfr['Test_acc'].mean():.{precision}e}", f"{df_adver['Test_acc'].mean():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].mean():.{precision}e}")])
print_to_tex(a, 'mean-test-accuracy.tex', dataset)
if median_accuracy:
raise NotImplementedError
b = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].median():.{precision}e}", f"{df_nosensitive['Test_acc'].median():.{precision}e}", f"{df_DIR['Test_acc'].median():.{precision}e}", f"{df_ps['Test_acc'].median():.{precision}e}", f"{df_massaging['Test_acc'].median():.{precision}e}", f"{df_lfr['Test_acc'].median():.{precision}e}", f"{df_adver['Test_acc'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].median():.{precision}e}")])
print_to_tex(b, 'median-test-accuracy.tex', dataset)
if discm_for_max_accuracy:
k = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['Discm_percent'].min():.{precision}e}"])
print_to_tex(k, f'discm_for_max_accuracy_dist{dist}.tex', dataset)
return df_main
def print_to_tex(string, file, dataset, mode=None):
if mode == None:
if dataset == "adult":
mode = "w"
else:
mode = "a"
# with open(f"../../neurips_fairness_paper/tables/{file}", mode) as f:
with open(f"tables/{file}", mode) as f:
if dataset == "salary":
string += " \\\ \midrule"
else:
string += " \\\\ "
print(string, file=f)
# print(dataset)
# print("Min discm: ", df_DIR['Discm_percent'].min())
# print("Min discm: ", df_ps['Discm_percent'].min())
# print("Min discm: ", df_massaging['Discm_percent'].min())
# print("Min discm: ", df_lfr['Discm_percent'].min())
# print("Min discm: ", df_adver['Discm_percent'].min())
# print("Min discm: ", df_our['Discm_percent'].min())
def main(plot):
df_main = None
benchmarks = ["adult", "adult_race", "german", "student", "compas-ground", "compas-score", "default", "salary"]
for dataset in benchmarks:
# df_onedataset = boxplots_datasets(dataset, plot)
df_onedataset = boxplots_datasets_dist(dataset, plot)
if not df_main is None:
df_main = pd.concat([df_main, df_onedataset])
else:
df_main = copy.deepcopy(df_onedataset)
print(f"Done {dataset}")
if plot == 0:
return
labels = ['FU', 'SR', 'DIR', 'PS', 'MA', 'LFR', 'AD', 'Our']
tech_cat = pd.Categorical(df_main['Baseline'], categories=labels)
df_main = df_main.assign(Technique_x = tech_cat)
dataset_order = ["D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8"]
data_cat = pd.Categorical(df_main['Dataset'], categories=dataset_order)
df_main = df_main.assign(Dataset_x = data_cat)
# x = (ggplot(aes(x='Technique_x', y='Discm_percent', color='Techniques'), data=df_main) +\
# geom_boxplot() +\
# facet_wrap(['Dataset'], scales = 'free', nrow=2, labeller='label_both', shrink=False) + \
# ylab("Remaining Individual Discrimination") + \
# xlab("Discrimination reducing techniques") + \
# # ylim(0, 20) + \
# # ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
# theme(axis_text_x = element_text(size=6), dpi=151) + \
# theme_seaborn()
# )
# This is responsible for the legend - remove color='Techniques'
x = (ggplot(aes(x='Technique_x', y='Discm_percent'), data=df_main) +\
geom_boxplot() +\
facet_wrap(['Dataset_x'], scales = 'free', nrow=2, labeller='label_value', shrink=True) + \
ylab("Remaining Individual Discrimination") + \
xlab("Discrimination reducing techniques") + \
# ylim(0, 20) + \
# ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
theme(axis_text_x = element_text(size=6), dpi=151) + \
theme_seaborn()
)
x = x.draw()
x.set_figwidth(20)
x.set_figheight(12)
for ax in range(len(benchmarks)):
low_limit = -0.05
top_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Discm_percent'].max()
if df_main[df_main['Dataset'] == f'D{ax+1}']['Discm_percent'].max() > 20:
top_limit = 20
if top_limit > 13: # These hacks are for aligning the 0 at the bottom of the plots.
low_limit = -0.3
x.axes[ax].set_ylim(low_limit, top_limit)
# x.tight_layout() # This didn't work
x.savefig(f"boxplots/boxplot_discm_freeaxis_matplotlib_dist{dist}.eps", format='eps', bbox_inches='tight')
x.savefig(f"boxplots/boxplot_discm_freeaxis_matplotlib_dist{dist}.png", bbox_inches='tight')
# x.save(f"boxplot_discm_freeaxis_matplotlib.png", height=8, width=18)
# x.save(f"boxplot_discm_freeaxis_withoutfull.png", height=12, width=15)
# x.save(f"boxplot_discm_fixedaxis.png", height=5, width=12)
y = (ggplot(aes(x='Technique_x', y='Test_acc'), data=df_main) +\
geom_boxplot() +\
facet_wrap(['Dataset_x'], scales = 'free', nrow=2, labeller='label_value', shrink=True) + \
ylab("Test Accuracy") + \
xlab("Discrimination reducing techniques") + \
# ylim(0, 100) + \
# ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
theme(axis_text_x = element_text(size=6), dpi=151) + \
theme_seaborn()
)
# y.save(f"boxplot_accuracy_freeaxis.png", height=8, width=18)
y = y.draw()
y.set_figwidth(20)
y.set_figheight(12)
for ax in range(len(benchmarks)):
bot_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Test_acc'].min()
top_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Test_acc'].max()
y.axes[ax].set_ylim(bot_limit - 1, top_limit + 2)
# y.tight_layout()
y.savefig(f"boxplots/boxplot_accuracy_freeaxis_matplotlib_dist{dist}.eps", format='eps', bbox_inches='tight')
y.savefig(f"boxplots/boxplot_accuracy_freeaxis_matplotlib_dist{dist}.png", bbox_inches='tight')
def real_accuracy_tables(debiased):
dataset = "compas-score"
if debiased:
deb = "debiased"
else:
deb = "full"
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique()))
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent']]
df_our_2 = pd.read_csv(f"{dataset}/results_our_real_accuracy_{deb}.csv")
df_our_final = pd.merge(df_our, df_our_2, on=['Model-count'])
df_our_final['Test_acc'] = df_our_final['Test_acc'].apply(lambda x: x*100)
df_our_final['Techniques'] = "Our Technique"
df_our_final['Baseline'] = "Our"
# import ipdb; ipdb.set_trace()
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
df_massaging.drop(columns=['Test_acc'], inplace=True)
df_massaging_2 = pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_real_accuracy_{deb}.csv")
df_massaging_final = pd.merge(df_massaging, df_massaging_2, on=['Model-count'])
df_massaging_final['Test_acc'] = df_massaging_final['Test_acc'].apply(lambda x: x*100)
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
df_ps.drop(columns=['Test_acc'], inplace=True)
df_ps_2 = pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_real_accuracy_{deb}.csv")
df_ps_final = pd.merge(df_ps, df_ps_2, on=['Model-count'])
df_ps_final['Test_acc'] = df_ps_final['Test_acc'].apply(lambda x: x*100)
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
df_lfr.drop(columns=['Test_acc'], inplace=True)
df_lfr_2 = pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_real_accuracy_{deb}.csv")
df_lfr_final = pd.merge(df_lfr, df_lfr_2, on=['Model-count'])
df_lfr_final['Test_acc'] = df_lfr_final['Test_acc'].apply(lambda x: x*100)
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
df_DIR.drop(columns=['Test_acc'], inplace=True)
df_DIR_2 = pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_real_accuracy_{deb}.csv")
df_DIR_final = pd.merge(df_DIR, df_DIR_2, on=['Model-count'])
df_DIR_final['Test_acc'] = df_DIR_final['Test_acc'].apply(lambda x: x*100)
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm']*12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent']]
df_adver_2 = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_real_accuracy_{deb}.csv")
df_adver_2['Model-count'] = df_adver_2['Dataperm']*12
df_adver_final = pd.merge(df_adver, df_adver_2, on=['Model-count'])
df_adver_final['Test_acc'] = df_adver_final['Test_acc'].apply(lambda x: x*100)
df_adver_final['Techniques'] = "Adversa. debias"
df_adver_final['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
df_nosensitive = pd.read_csv(f"{dataset}/results_nosensitive_real_accuracy_{deb}.csv")
df_nosensitive = df_nosensitive[['Model-count','Test_acc']]
df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
# df_nosensitive = process_dfs("Sensitive Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv"))
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
df_noremoval.drop(columns=['Test_acc'], inplace=True)
df_noremoval_2 = pd.read_csv(f"{dataset}/results_noremoval_real_accuracy_{deb}.csv")
df_noremoval_final = pd.merge(df_noremoval, df_noremoval_2, on=['Model-count'])
df_noremoval_final['Test_acc'] = df_noremoval_final['Test_acc'].apply(lambda x: x*100)
max_accuracy = True
corresponding_max_accuracy = True
mean_accuracy = False
median_accuracy = False
id_ = "D5"
precision = 1
if corresponding_max_accuracy:
# for sensitive there is always 0 discrimination.
z = ' & '.join([f"{id_}", f"{df_noremoval_final.loc[df_noremoval_final['Discm_percent'] == df_noremoval_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive['Test_acc'].max():.{precision}e}",
f"{df_DIR_final.loc[df_DIR_final['Discm_percent'] == df_DIR_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps_final.loc[df_ps_final['Discm_percent'] == df_ps_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging_final.loc[df_massaging_final['Discm_percent'] == df_massaging_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr_final.loc[df_lfr_final['Discm_percent'] == df_lfr_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver_final.loc[df_adver_final['Discm_percent'] == df_adver_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our_final.loc[df_our_final['Discm_percent'] == df_our_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
a = ' & '.join([f"{id_}", f"{df_noremoval_final['Discm_percent'].min():.{precision}e}",
"0.0",
f"{df_DIR_final['Discm_percent'].min():.{precision}e}",
f"{df_ps_final['Discm_percent'].min():.{precision}e}",
f"{df_massaging_final['Discm_percent'].min():.{precision}e}",
f"{df_lfr_final['Discm_percent'].min():.{precision}e}",
f"{df_adver_final['Discm_percent'].min():.{precision}e}",
f"{df_our_final['Discm_percent'].min():.{precision}e}"])
print_to_tex(z, f'correspond-real-accuracy_{deb}.tex', dataset, "w")
print_to_tex(a, f'correspond-real-accuracy_{deb}.tex', dataset, "a")
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval_final['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR_final['Test_acc'].max():.{precision}e}", f"{df_ps_final['Test_acc'].max():.{precision}e}", f"{df_massaging_final['Test_acc'].max():.{precision}e}", f"{df_lfr_final['Test_acc'].max():.{precision}e}", f"{df_adver_final['Test_acc'].max():.{precision}e}", f"{df_our_final['Test_acc'].max():.{precision}e}"])
print_to_tex(y, f'max-real-accuracy_{deb}.tex', dataset, "w")
print("Done real accuracy")
def fpr_fnr_process_dfs(name, batches, df):
if 'Model-count'in df.columns:
df['Model-count2'] = df.apply(process_rows, axis=1, args=((batches,)))
assert (df['Model-count'] == df['Model-count2']).all()
df.drop(columns=['Model-count2'], inplace=True)
else:
df['Model-count'] = df.apply(process_rows, axis=1, args=((batches,)))
assert len(df['Model-count'].unique()) == 240 and df['Model-count'].max() == 239 and df['Model-count'].min() == 0
df = df.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df = df[['Model-count','Discm_percent', 'Test_acc', 'Class0_FPR', 'Class0_FNR', 'Class1_FPR', 'Class1_FNR']]
df['FPR_diff'] = abs(df['Class0_FPR'] - df['Class1_FPR'])*100
# df_our['FPR_sum'] = df_our['Class0_FPR'] + df_our['Class1_FPR']
df['FPR_ratio'] = df['Class0_FPR'] / df['Class1_FPR']
df['FNR_diff'] = abs(df['Class0_FNR'] - df['Class1_FNR'])*100
# df_our['FNR_sum'] = df_our['Class0_FNR'] + df_our['Class1_FNR']
df['FNR_ratio'] = df['Class0_FNR'] / df['Class1_FNR']
# df['diff'] = abs(df['Class0_Pos'] - df['Class1_Pos'])*100
df['Test_acc'] = df['Test_acc'].apply(lambda x: x*100)
df['Techniques'] = name
return df
def fpr_fnr_rates():
def fpr_fnr_print(dataset, id_, kind):
if kind:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
else:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_fulltest.csv")
batches = sorted(list(df1.Batch.unique()))
assert(len(batches) == 2)
# import ipdb; ipdb.set_trace()
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent', 'Test_acc', 'Class0_FPR', 'Class0_FNR', 'Class1_FPR', 'Class1_FNR']]
df_our['FPR_diff'] = abs(df_our['Class0_FPR'] - df_our['Class1_FPR'])*100
# df_our['FPR_sum'] = df_our['Class0_FPR'] + df_our['Class1_FPR']
df_our['FPR_ratio'] = df_our['Class0_FPR'] / df_our['Class1_FPR']
df_our['FNR_diff'] = abs(df_our['Class0_FNR'] - df_our['Class1_FNR'])*100
# df_our['FNR_sum'] = df_our['Class0_FNR'] + df_our['Class1_FNR']
df_our['FNR_ratio'] = df_our['Class0_FNR'] / df_our['Class1_FNR']
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
if kind:
df_massaging = fpr_fnr_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_fulltest.csv"))
else:
df_massaging = fpr_fnr_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
# Preferential Sampling
if kind:
df_ps = fpr_fnr_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_fulltest.csv"))
else:
df_ps = fpr_fnr_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
# Learning Fair representations
if kind:
df_lfr = fpr_fnr_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_fulltest.csv"))
else:
df_lfr = fpr_fnr_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
# Disparate Impact Removed
if kind:
df_DIR = fpr_fnr_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_fulltest.csv"))
else:
df_DIR = fpr_fnr_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
# Adversarial Sampling
if kind:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_fulltest.csv")
else:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm']*12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
# df_adver = df_adver[['Model-count','Discm_percent', 'Test_acc', 'Class0_FPR', 'Class0_FNR', 'Class1_FPR', 'Class1_FNR']]
# df_adver['FPR_diff'] = abs(df_adver['Class0_FPR'] - df_adver['Class1_FPR'])*100
# # df_our['FPR_sum'] = df_our['Class0_FPR'] + df_our['Class1_FPR']
# df_adver['FPR_ratio'] = df_adver['Class0_FPR'] / df_adver['Class1_FPR']
# df_adver['FNR_diff'] = abs(df_adver['Class0_FNR'] - df_adver['Class1_FNR'])*100
# # df_our['FNR_sum'] = df_our['Class0_FNR'] + df_our['Class1_FNR']
# df_adver['FNR_ratio'] = df_adver['Class0_FNR'] / df_adver['Class1_FNR']
df_adver['FPR_diff'] = df_adver['FPR_ratio'] = df_adver['FNR_diff'] = df_adver['FNR_ratio'] = 1000.0
# df_adver['diff'] = df_adver['diff']*100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = df_nosensitive[['Model-count', 'Test_acc', 'Class0_FPR', 'Class0_FNR', 'Class1_FPR', 'Class1_FNR']]
df_nosensitive['FPR_diff'] = abs(df_nosensitive['Class0_FPR'] - df_nosensitive['Class1_FPR'])*100
# df_nosensitive['FPR_sum'] = df_nosensitive['Class0_FPR'] + df_nosensitive['Class1_FPR']
df_nosensitive['FPR_ratio'] = df_nosensitive['Class0_FPR'] / df_nosensitive['Class1_FPR']
df_nosensitive['FNR_diff'] = abs(df_nosensitive['Class0_FNR'] - df_nosensitive['Class1_FNR'])*100
# df_nosensitive['FNR_sum'] = df_nosensitive['Class0_FNR'] + df_nosensitive['Class1_FNR']
df_nosensitive['FNR_ratio'] = df_nosensitive['Class0_FNR'] / df_nosensitive['Class1_FNR']
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
if kind:
df_noremoval = fpr_fnr_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_fulltest.csv"))
else:
df_noremoval = fpr_fnr_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
min_rate_difference = True
rate_difference_for_min_discm = True
rate_difference_for_max_accuracy = True
precision = 1
if min_rate_difference:
a = ' & '.join([f"{id_}",
str(float(f"{df_noremoval['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_ps['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_adver['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_our['FPR_diff'].min():.{precision}e}"))])
b = ' & '.join([f"{id_}",
str(float(f"{df_noremoval['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_ps['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_adver['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_our['FNR_diff'].min():.{precision}e}"))])
# b = ' & '.join([f"{id_}", f"{df_nosensitive['FNR_diff'].min():.{precision}e}", "\\textbf{%s}"%(f"{df_our['FNR_diff'].min():.{precision}e}")])
# c = ' & '.join([f"{id_}", f"{df_nosensitive['FPR_ratio'].min():.{precision}e}", "\\textbf{%s}"%(f"{df_our['FPR_ratio'].min():.{precision}e}")])
# d = ' & '.join([f"{id_}", f"{df_nosensitive['FNR_ratio'].min():.{precision}e}", "\\textbf{%s}"%(f"{df_our['FNR_ratio'].min():.{precision}e}")])
# e = ' & '.join([f"{id_}", f"{df_nosensitive['Class0_FPR'].min():.{precision}e}", f"{df_nosensitive['Class1_FPR'].min():.{precision}e}", f"{df_our['Class0_FNR'].min():.{precision}e}", f"{df_our['Class1_FNR'].min():.{precision}e}"])
if kind:
print_to_tex(a, 'min-fpr_rate_fulltest.tex', dataset)
print_to_tex(b, 'min-fnr_rate_fulltest.tex', dataset)
else:
print_to_tex(a, 'min-fpr_rate_debiasedtest.tex', dataset)
print_to_tex(b, 'min-fnr_rate_debiasedtest.tex', dataset)
if rate_difference_for_min_discm:
x = ' & '.join([f"{id_}",
str(float(f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}"))])
y = ' & '.join([f"{id_}",
str(float(f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}"))])
# l = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_sum'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FPR_sum'].min():.{precision}e}"])
# m = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_sum'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FNR_sum'].min():.{precision}e}"])
# q = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_ratio'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FPR_ratio'].min():.{precision}e}"])
# r = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_ratio'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FNR_ratio'].min():.{precision}e}"])
# z = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_ratio'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FPR_ratio'].min():.{precision}e}"])
# z1 = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_ratio'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FNR_ratio'].min():.{precision}e}"])
# z2 = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['Class0_FPR'].min():.{precision}e}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['Class1_FPR'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Class0_FNR'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Class1_FNR'].min():.{precision}e}"])
if kind:
print_to_tex(x, 'fpr_rate-min-discm_fulltest.tex', dataset)
print_to_tex(y, 'fnr_rate-min-discm_fulltest.tex', dataset)
else:
print_to_tex(x, 'fpr_rate-min-discm_debiasedtest.tex', dataset)
print_to_tex(y, 'fnr_rate-min-discm_debiasedtest.tex', dataset)
if rate_difference_for_max_accuracy:
l = ' & '.join([f"{id_}",
str(float(f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['FPR_diff'].min():.{precision}e}"))])
m = ' & '.join([f"{id_}",
str(float(f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['FNR_diff'].min():.{precision}e}"))])
if kind:
print_to_tex(l, 'fpr_rate-max-accuracy_fulltest.tex', dataset)
print_to_tex(m, 'fnr_rate-max-accuracy_fulltest.tex', dataset)
else:
print_to_tex(l, 'fpr_rate-max-accuracy_debiasedtest.tex', dataset)
print_to_tex(m, 'fnr_rate-max-accuracy_debiasedtest.tex', dataset)
# df_main = None
benchmarks = ["adult", "adult_race", "german", "student", "compas-ground", "compas-score", "default", "salary"]
# benchmarks = ["adult", "german", "student", "compas-ground", "compas-score", "default"]
kind = "debiased"
# kind = "full"
for dataset in benchmarks:
if dataset == "adult":
id_ = "D1"
elif dataset == "adult_race":
id_ = "D2"
elif dataset == "german":
id_ = "D3"
elif dataset == "student":
id_ = "D4"
elif dataset == "compas-ground":
id_ = "D5"
elif dataset == "compas-score":
id_ = "D6"
elif dataset == "default":
id_ = "D7"
elif dataset == "salary":
id_ = "D8"
else:
raise NotImplementedError
if kind == "full":
fpr_fnr_print(dataset, id_, kind=True)
elif kind == "debiased":
fpr_fnr_print(dataset, id_, kind=False)
print(f"Done {dataset}")
def parity_process_dfs(name, batches, df):
if 'Model-count'in df.columns:
df['Model-count2'] = df.apply(process_rows, axis=1, args=((batches,)))
assert (df['Model-count'] == df['Model-count2']).all()
df.drop(columns=['Model-count2'], inplace=True)
else:
df['Model-count'] = df.apply(process_rows, axis=1, args=((batches,)))
assert len(df['Model-count'].unique()) == 240 and df['Model-count'].max() == 239 and df['Model-count'].min() == 0
df = df.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df = df[['Model-count','Discm_percent','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df['diff'] = abs(df['Class0_Pos'] - df['Class1_Pos'])*100
df['Test_acc'] = df['Test_acc'].apply(lambda x: x*100)
df['Techniques'] = name
if len(name.split()) > 1:
words = name.split()
letters = [word[0] for word in words]
x = "".join(letters)
df['Baseline'] = x
else:
df['Baseline'] = name[:2]
return df
def statistical_parity(dist_metric):
def parity_print(dataset, id_, kind, plot=False):
if kind:
if dist_metric:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_fulltest_dist{dist}.csv")
df2 = pd.read_csv(f"{dataset}/results_{dataset}_method1_dist{dist}.csv")
else:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_fulltest.csv")
df2 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
else:
if dist_metric:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_dist{dist}.csv")
else:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique()))
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos']) * 100
if kind:
df_our2 = find_min_discm_each_hyperparam(df2) # since the sorting is on the basis of discm, it remains same
df_our2['Test_acc'] = df_our2['Test_acc'].apply(lambda x: x * 100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# import ipdb; ipdb.set_trace()
if kind:
if dist_metric:
df_massaging = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_fulltest_dist{dist}.csv"))
df_massaging2 = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_dist{dist}.csv"))
else:
df_massaging = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_fulltest.csv"))
df_massaging2 = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
else:
if dist_metric:
df_massaging = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_dist{dist}.csv"))
else:
df_massaging = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
# Preferential Sampling
if kind:
if dist_metric:
df_ps = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_fulltest_dist{dist}.csv"))
df_ps2 = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_dist{dist}.csv"))
else:
df_ps = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_fulltest.csv"))
df_ps2 = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
else:
if dist_metric:
df_ps = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_dist{dist}.csv"))
else:
df_ps = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
# Learning Fair representations
if kind:
if dist_metric:
df_lfr = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_fulltest_dist{dist}.csv"))
df_lfr2 = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_dist{dist}.csv"))
else:
df_lfr = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_fulltest.csv"))
df_lfr2 = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
else:
if dist_metric:
df_lfr = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_dist{dist}.csv"))
else:
df_lfr = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
# Disparate Impact Removed
if kind:
if dist_metric:
df_DIR = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_fulltest_dist{dist}.csv"))
df_DIR2 = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_dist{dist}.csv"))
else:
df_DIR = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_fulltest.csv"))
df_DIR2 = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
else:
if dist_metric:
df_DIR = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_dist{dist}.csv"))
else:
df_DIR = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
# Adversarial Sampling
if kind:
if dist_metric:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_fulltest_dist{dist}.csv")
df_adver2 = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_dist{dist}.csv")
else:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_fulltest.csv")
df_adver2 = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
else:
if dist_metric:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_dist{dist}.csv")
else:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm'] * 12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver['diff'] = df_adver['diff'] * 100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x * 100)
if kind:
df_adver2['Model-count'] = df_adver2['Dataperm'] * 12
df_adver2 = df_adver2.sort_values("Discm_percent").groupby("Model-count", as_index=False).first()
df_adver2['Test_acc'] = df_adver2['Test_acc'].apply(lambda x: x * 100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
if kind:
if dist_metric:
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_fulltest_dist{dist}.csv")
df_nosensitive2 = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_dist{dist}.csv")
else:
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_fulltest.csv")
df_nosensitive2 = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
else:
if dist_metric:
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_dist{dist}.csv")
else:
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
# import ipdb; ipdb.set_trace()
if dist_metric:
df_nosensitive = df_nosensitive[['Model-count', 'Test_acc', 'Class0_Pos', 'Class1_Pos', 'Discm_percent']]
else:
df_nosensitive = df_nosensitive[['Model-count', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos']) * 100
if kind:
df_nosensitive2['Test_acc'] = df_nosensitive2['Test_acc'].apply(lambda x: x * 100)
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
if kind:
if dist_metric:
df_noremoval = parity_process_dfs("FULL", batches, | pd.read_csv(f"{dataset}/results_{dataset}_noremoval_fulltest_dist{dist}.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
""""Hinglish" Language - Modeling a MessyCode-Mixed Language.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1CYAvCp2GD-3anA2686ZTEja05gqTXCD_
"""
import pandas as pd
import matplotlib.pyplot as plt
df = | pd.read_csv("/content/drive/My Drive/NLP_Project/data/IIITH_Codemixed2.csv",names = ["Speech", "Labels"]) | pandas.read_csv |
from typing import Type, Callable, Tuple, Union
import numpy as np
import pandas as pd
import pytest
from py4j.java_gateway import JVMView
from keanu import set_deterministic_state
from keanu.context import KeanuContext
from keanu.vartypes import tensor_arg_types, primitive_types, numpy_types, pandas_types
from keanu.vertex import Gaussian, Const, UniformInt, Bernoulli, IntegerProxy, Double
from keanu.vertex.base import Vertex
@pytest.fixture
def jvm_view():
from py4j.java_gateway import java_import
jvm_view = KeanuContext().jvm_view()
java_import(jvm_view, "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.GaussianVertex")
return jvm_view
def assert_vertex_value_equals_scalar(vertex: Vertex, expected_type: Type, scalar: primitive_types) -> None:
vertex_value = vertex.get_value()
assert vertex_value == scalar
assert type(vertex_value) == numpy_types
assert vertex_value.shape == ()
assert vertex_value.dtype == expected_type
def assert_vertex_value_equals_ndarray(vertex: Vertex, expected_type: Type, ndarray: numpy_types) -> None:
vertex_value = vertex.get_value()
expected_value = ndarray.astype(expected_type)
assert np.array_equal(vertex_value, expected_value)
assert np.issubdtype(vertex_value.dtype, expected_type)
def assert_vertex_value_equals_pandas(vertex: Vertex, expected_type: Type, pandas: pandas_types) -> None:
get_value = vertex.get_value()
expected_value = pandas.values.astype(expected_type).reshape(get_value.shape)
assert np.array_equal(get_value, expected_value)
assert np.issubdtype(get_value.dtype, expected_type)
def test_can_pass_scalar_to_vertex() -> None:
gaussian = Gaussian(0., 1.)
sample = gaussian.sample()
assert type(sample) == numpy_types
assert sample.shape == ()
assert sample.dtype == float
def test_can_pass_ndarray_to_vertex() -> None:
gaussian = Gaussian(np.array([0.1, 0.4]), np.array([0.4, 0.5]))
sample = gaussian.sample()
assert sample.shape == (2,)
def test_can_pass_pandas_dataframe_to_vertex() -> None:
gaussian = Gaussian(pd.DataFrame(data=[0.1, 0.4]), pd.DataFrame(data=[0.1, 0.4]))
sample = gaussian.sample()
assert sample.shape == (2, 1)
def test_can_pass_pandas_series_to_vertex() -> None:
gaussian = Gaussian(pd.Series(data=[0.1, 0.4]), pd.Series(data=[0.1, 0.4]))
sample = gaussian.sample()
assert sample.shape == (2,)
def test_can_pass_vertex_to_vertex(jvm_view: JVMView) -> None:
mu = Gaussian(0., 1.)
gaussian = Vertex(jvm_view.GaussianVertex, "gaussian", mu, Const(1.))
sample = gaussian.sample()
assert type(sample) == numpy_types
assert sample.shape == ()
assert sample.dtype == float
def test_can_pass_array_to_vertex(jvm_view: JVMView) -> None:
gaussian = Vertex(jvm_view.GaussianVertex, "gaussian", [3, 3], Const(0.), Const(1.))
sample = gaussian.sample()
assert sample.shape == (3, 3)
def test_cannot_pass_generic_to_vertex(jvm_view: JVMView) -> None:
class GenericExampleClass:
pass
with pytest.raises(ValueError, match=r"Can't parse generic argument. Was given {}".format(GenericExampleClass)):
Vertex( # type: ignore # this is expected to fail mypy
jvm_view.GaussianVertex, "gaussian", GenericExampleClass(), GenericExampleClass())
def test_int_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[1, 2], [3, 4]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.int64 or value.dtype == np.int32
assert (value == ndarray).all()
def test_float_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[1., 2.], [3., 4.]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.float64
assert (value == ndarray).all()
def test_boolean_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[True, True], [False, True]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.bool_
assert (value == ndarray).all()
def test_scalar_vertex_value_is_a_numpy_array() -> None:
scalar = 1.
vertex = Const(scalar)
value = vertex.get_value()
assert type(value) == numpy_types
assert value.shape == ()
assert value.dtype == float
assert value == scalar
def test_vertex_sample_is_a_numpy_array() -> None:
mu = np.array([[1., 2.], [3., 4.]])
sigma = np.array([[.1, .2], [.3, .4]])
vertex = Gaussian(mu, sigma)
value = vertex.sample()
assert type(value) == np.ndarray
assert value.dtype == np.float64
assert value.shape == (2, 2)
def test_get_connected_graph() -> None:
gaussian = Gaussian(0., 1.)
connected_graph = set(gaussian.iter_connected_graph())
assert len(connected_graph) == 3
def test_id_str_of_downstream_vertex_is_higher_than_upstream() -> None:
hyper_params = Gaussian(0., 1.)
gaussian = Gaussian(0., hyper_params)
hyper_params_id = hyper_params.get_id()
gaussian_id = gaussian.get_id()
assert type(hyper_params_id) == tuple
assert type(gaussian_id) == tuple
assert hyper_params_id < gaussian_id
def test_construct_vertex_with_java_vertex() -> None:
java_vertex = Gaussian(0., 1.).unwrap()
python_vertex = Vertex._from_java_vertex(java_vertex)
assert tuple(java_vertex.getId().getValue()) == python_vertex.get_id()
def test_java_collections_to_generator() -> None:
gaussian = Gaussian(0., 1.)
java_collections = gaussian.unwrap().getConnectedGraph()
python_list = list(Vertex._to_generator(java_collections))
java_vertex_ids = [Vertex._get_python_id(java_vertex) for java_vertex in java_collections]
assert java_collections.size() == len(python_list)
assert all(type(element) == Double and element.get_id() in java_vertex_ids for element in python_list)
def test_get_vertex_id() -> None:
gaussian = Gaussian(0., 1.)
java_id = gaussian.unwrap().getId().getValue()
python_id = gaussian.get_id()
assert all(value in python_id for value in java_id)
def test_ids_are_reset() -> None:
gaussian = Gaussian(0., 1.)
set_deterministic_state()
gaussian2 = Gaussian(0., 1.)
assert gaussian.get_id() == gaussian2.get_id()
@pytest.mark.parametrize("vertex, expected_type", [(Gaussian(0., 1.), np.floating), (UniformInt(0, 10), np.integer),
(Bernoulli(0.5), np.bool_)])
@pytest.mark.parametrize("value, assert_vertex_value_equals",
[(np.array([[4]]), assert_vertex_value_equals_ndarray),
(np.array([[5.]]), assert_vertex_value_equals_ndarray),
(np.array([[True]]), assert_vertex_value_equals_ndarray),
(np.array([[1, 2], [3, 4]]), assert_vertex_value_equals_ndarray),
(pd.Series(data=[4]), assert_vertex_value_equals_pandas),
(pd.Series(data=[5.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1, 2, 3]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1., 2., 3.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True, False, False]), assert_vertex_value_equals_pandas),
( | pd.DataFrame(data=[[4]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 10:59:53 2018
@author: <NAME>
this code predicts if a passenger would survive or not from the titanic
ship wreck
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
dataset= pd.read_csv('train.csv')
#using seaborn to visualize colums with more missing values
null_value=sns.heatmap(dataset.isnull(), yticklabels=False, cmap='viridis', cbar=False)
#using seaborn to count the number of survivors based on sex
sns.countplot(x='Survived', hue='Sex', data=dataset)
#using seaborn to count the number of survivors based on class
sns.countplot(x='Survived', hue='Pclass', data=dataset)
#distribution plot of ages
dataset['Age'].plot.hist()
#using seaborn to count the sibling/spouse
sns.countplot(x='SibSp', data=dataset)
#distribution plot of fare
dataset['Fare'].plot.hist()
#find the missing age based on the Pclass
dataset.groupby('Pclass').mean()
def get_age(cols):
Age=cols[0]
Pclass=cols[1]
if pd.isnull(Age):
if Pclass ==1:
return 38
elif Pclass == 2:
return 29
else:
return 25
else:
return Age
dataset['Age'] = dataset[['Age', 'Pclass']].apply(get_age, axis=1)
#drop this column since there are so many missing values
dataset.drop('Cabin', axis=1, inplace=True)
#converting categorical variables to dummy or indicator variables
sex=pd.get_dummies(dataset['Sex'], drop_first=True)
embark= | pd.get_dummies(dataset['Embarked'], drop_first=True) | pandas.get_dummies |
"""
Outputs a .csv where columns are cluster name and rows are genes with values of the percent of cells that had one
of the marker genes expressed.
"""
import argparse
import pandas as pd
import scanpy as sc
import sys
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-ia',"--anndata", type=str, help="path to anndata object",
required=True
)
parser.add_argument('-cs', "--cluster_solution_name", type=str, help="name of the accessor to the cluster solution",
required=True
)
parser.add_argument('-o', "--output", type=str, help="name of the centroids .csv file ",
required=True
)
opts = parser.parse_args()
ad_path, cs_name, output = opts.anndata, opts.cluster_solution_name, opts.output
return ad_path, cs_name, output
def proportion_expressed_cluster(ad, cluster_solution_name):
"""
outputs a dataframe [genes x cluster] that is the percentage that gene is expressed in a cluster
:param ad: scanpy.Anndata
:param cluster_solution_name: string, key accessor for ad.obs cluster_solution (
:return: pandas.DataFrame
"""
cluster_solution = ad.obs[cluster_solution_name]
pcent_df = | pd.DataFrame(index=ad.var_names) | pandas.DataFrame |
# name matching using locality-sensitive hashing (simhash)
# these are mostly idempotent
from itertools import chain, repeat
from collections import defaultdict
from math import ceil
import numpy as np
import pandas as pd
import networkx as nx
from editdistance import eval as levenshtein
from tools.standardize import standardize_weak, standardize_strong
from tools.tables import read_csv
from tools.simhash import shingle, Cluster
# firm name sources - tag: (table, id_col, name_col)
colmap = {
'apply': ('apply_apply', 'appnum', 'appname'),
'grant': ('grant_grant', 'patnum', 'owner'),
'assignor': ('assign_use', 'assignid', 'assignor'),
'assignee': ('assign_use', 'assignid', 'assignee'),
'compustat': ('compustat', 'compid', 'name'),
}
# find all unique names
def generate_names(output, columns):
print('generating names')
sdict = {}
for tag, (table, id_col, name_col) in columns.items():
src = read_csv(f'{output}/{table}.csv', usecols=[id_col, name_col]).dropna()
src['name'] = src[name_col].apply(standardize_weak)
sdict[tag] = src
names = pd.concat([src['name'] for src in sdict.values()], axis=0).drop_duplicates()
names = names[names.str.len()>0].reset_index(drop=True)
names = names.rename('name').rename_axis('id').reset_index()
names.to_csv(f'{output}/name.csv', index=False)
for tag, (table, id_col, name_col) in columns.items():
src = pd.merge(sdict[tag], names, how='left', on='name')
src[[id_col, 'id']].to_csv(f'{output}/{tag}_match.csv', index=False)
print(f'found {len(names)} names')
# k = 8, thresh = 4 works well
def filter_pairs(output, nshingle=2, k=8, thresh=4):
print('filtering pairs')
c = Cluster(k=k, thresh=thresh)
name_dict = {}
names = read_csv(f'{output}/name.csv', usecols=['id', 'name'])
for i, id, name in names.itertuples():
words = name.split()
shings = list(shingle(name, nshingle))
features = shings + words
weights = list(np.linspace(1.0, 0.0, len(shings))) + list(np.linspace(1.0, 0.0, len(words)))
c.add(features, weights=weights, label=id)
name_dict[id] = name
if i > 0 and i % 100_000 == 0:
print(f'{i}: {len(c.unions)}')
pairs = pd.DataFrame([(i1, i2, name_dict[i1], name_dict[i2]) for i1, i2 in c.unions], columns=['id1', 'id2', 'name1', 'name2'])
pairs.to_csv(f'{output}/pair.csv', index=False)
print('Found %i pairs' % len(pairs))
# compute distances on owners in same cluster
def find_groups(output, thresh=0.85):
print('finding matches')
def dmetr(name1, name2):
max_len = max(len(name1), len(name2))
max_dist = int(ceil(max_len*(1.0-thresh)))
ldist = levenshtein(name1, name2)
return (1.0 - float(ldist)/max_len) if (ldist != -1 and max_len != 0) else 0.0
close = []
name_std = {}
pairs = read_csv(f'{output}/pair.csv', usecols=['id1', 'id2', 'name1', 'name2'])
for i, id1, id2, name1, name2 in pairs.itertuples():
if id1 not in name_std:
name_std[id1] = standardize_strong(name1)
if id2 not in name_std:
name_std[id2] = standardize_strong(name2)
n1std = name_std[id1]
n2std = name_std[id2]
if dmetr(n1std, n2std) > thresh:
close.append((id1, id2))
if i > 0 and i % 100_000 == 0:
print(f'{i}: {len(close)}')
G = nx.Graph()
G.add_edges_from(close)
comps = sorted(nx.connected_components(G), key=len, reverse=True)
match = pd.DataFrame(chain(*[zip(repeat(fid), ids) for fid, ids in enumerate(comps)]), columns=['firm_num', 'id'])
match.to_csv(f'{output}/match.csv', index=False)
print(f'found {len(comps)} groups')
# must be less than 1000000 components
def merge_firms(output, columns, base=1000000):
print('merging firms')
names = read_csv(f'{output}/name.csv')
match = read_csv(f'{output}/match.csv')
firms = pd.merge(names, match, how='left', on='id')
firms['firm_num'] = firms['firm_num'].fillna(firms['id']+base).astype(np.int)
firms[['firm_num', 'id']].to_csv(f'{output}/firm.csv', index=False)
for tag, (table, id_col, name_col) in columns.items():
src = read_csv(f'{output}/{tag}_match.csv')
src = | pd.merge(src, firms, on='id') | pandas.merge |
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pandas as pd
import pytest
from causalnex.evaluation import classification_report, roc_auc
from causalnex.inference import InferenceEngine
from causalnex.network import BayesianNetwork
from causalnex.structure import StructureModel
from causalnex.structure.notears import from_pandas
from causalnex.utils.network_utils import get_markov_blanket
from .estimator.test_em import naive_bayes_plus_parents
class TestFitNodeStates:
"""Test behaviour of fit node states method"""
@pytest.mark.parametrize(
"weighted_edges, data",
[
([("a", "b", 1)], pd.DataFrame([[1, 1]], columns=["a", "b"])),
(
[("a", "b", 1)],
| pd.DataFrame([[1, 1, 1, 1]], columns=["a", "b", "c", "d"]) | pandas.DataFrame |
import numpy as np
import sys
import math
import pandas as pd
sys.path.append('../../')
import bayesim.model as bym
import bayesim.params as byp
import deepdish as dd
import matplotlib.pyplot as plt
# First, define our model
def model_y(ec, params):
"""
Simple model of a kinematic trajectory.
Args:
ec (`dict`): dictionary with a key 't' leading to a float value
params (`dict`): dictionary with keys 'v0' and 'g' leading to float values
Returns:
float: y-value at the given time assuming the given parameters
"""
# read in the inputs
t = ec['t']
v0 = params['v0']
g = params['g']
# compute and return
return v0*t - 0.5 * g * t**2
# describe our parameters
pl = byp.Param_list()
pl.add_fit_param(name='v0', val_range=[0,20], units='m/s')
pl.add_fit_param(name='g', val_range=[0,20], units='m/s^2')
pl.add_ec(name='t', units='s', is_x=True) # plot this on the x-axis
pl.add_output(name='y', units='m')
# the first two "observations"
data = pd.DataFrame()
data['t'] = [2, 2.3]
data['y'] = [3, 0.1]
data['uncertainty'] = [0.2, 0.5]
dd.io.save('two_points.h5', data)
# initialize bayesim model object
m = bym.Model(params=pl, obs_data_path='two_points.h5', model_data_func=model_y, calc_model_unc=True)
m.run()
m.visualize_probs(fpath='two_obs_probs.png')
# now let's add some more "observed" data by just generating it using our model function and some parameter values consistent with what we've done so far
data = pd.DataFrame()
t_vals = np.arange(0,3,0.1)
y_vals = [model_y({'t':t},{'v0':11.31,'g':9.81}) for t in t_vals]
data['t'] = t_vals
data['y'] = y_vals
dd.io.save('obs_data.h5',data)
# initialize bayesim model object again, now with more data (and assuming the larger uncertainty value for all the points)
m = bym.Model(params=pl, obs_data_path='obs_data.h5', model_data_func=model_y, calc_model_unc=True, fixed_unc=0.5)
# run, using all data points
m.run(min_num_pts=len(m.obs_data))
m.visualize_probs(fpath='probs_1.png')
m.comparison_plot(fpath='comp_1.png')
# now subdivide, do further "simulations", and run inference again
m.subdivide()
new_pts = dd.io.load('new_sim_points_1.h5')
new_sims = []
for pt in new_pts.iterrows():
t = pt[1]['t']
params = pt[1][m.fit_param_names()]
y = model_y({'t':t}, params)
this_pt = [t, y] + [pt[1][n] for n in m.fit_param_names()]
new_sims.append(this_pt)
columns = ['t', 'y'] + [n for n in m.fit_param_names()]
new_sim_data = | pd.DataFrame.from_records(data=new_sims, columns=columns) | pandas.DataFrame.from_records |
#!/usr/bin/env python3
'''
open large dataframe and sort into smaller dataframes for plotting
'''
import os
import pandas as pd
from dp_cmip_tools import replace_mask_name, convert_to_df_time , save_df
Inputfiles=('CMIP6_ssp_tas-pr-rsds-snw-snc_seasonal_NG.csv',
'CMIP5_rcp_tas-pr-rsds-snw-snc_seasonal_NG.csv',
'CMIP6_ssp_tas-pr-rsds-snw-snc_seasonal_PRUDENCE.csv',
'CMIP5_rcp_tas-pr-rsds-snw-snc_seasonal_PRUDENCE.csv',
'CMIP6_ssp_tas-pr-rsds-snw-snc_seasonal_SREX_NEU_CEU_MED.csv',
'CMIP5_rcp_tas-pr-rsds-snw-snc_seasonal_SREX_NEU_CEU_MED.csv',
'EUR-11_CORDEX_rcp26-rcp45-rcp85_tas-pr_PRUDENCE_landonly_seasonal_fldmean.csv')
#Inputfiles=('EUR-11_CORDEX_rcp26-rcp45-rcp85_tas-pr_SREX-AR6-NEU-CEU-MED_landonly_seasonal_fldmean.csv',)
#Inputfiles=('EUR-11_CORDEX_rcp26-rcp45-rcp85_tas-pr-rsds-snw_NG_landonly_seasonal_fldmean.csv',)
print(os.getcwd())
workdir=os.getcwd()
#
# Select input data and output directory
#
Inputdir=workdir.replace('py_plotting_cmip_cordex','INPUT_DATA')
print(' ')
print('datafile is read from: ', Inputdir)
# Make Outputdir
Outdatadir=workdir.replace('py_plotting_cmip_cordex','SCATTER/data')
if not os.path.exists(Outdatadir):
os.makedirs(Outdatadir)
print(' ')
print('Output will be stored in : ', Outdatadir)
for Infile in Inputfiles:
filename=os.path.join(Inputdir,Infile)
print(' ')
print(' File which is processed:')
print(Infile)
# read dataframe
with open(filename, "r") as file:
df = pd.read_csv(file)
# prudence regions are numbers from 1 to 8 and get new abbreviations
df['mask'] = df['mask'].apply(str)
replace_mask_name(df)
# planed are plots for each region, season, exp, time-slice
for mip in df['project_id'].unique():
for mask in df['mask'].unique():
for season in df['season'].unique():
if mip != 'EURO-CORDEX':
for exp in ('historical',):
for time in ('1981-01-01 to 2010-12-31',):
dfhist=convert_to_df_time(df, mip, mask, season, time, exp)
#save dfhist for plotting
save_df(Outdatadir,dfhist,time,mip,mask,season,exp)
for exp in df['experiment_id'].unique():
if mip == 'EURO-CORDEX':
for time in ('1981-01-01 to 2010-12-31',):
dfhist=convert_to_df_time(df, mip, mask, season, time, exp)
save_df(Outdatadir,dfhist,time,mip,mask,season,exp)
if exp != 'historical':
for time in ('2036-01-01 to 2065-12-31','2070-01-01 to 2099-12-31'):
dfsce=convert_to_df_time(df, mip, mask, season, time, exp)
# join historical and Scenario
dfsce= | pd.concat([dfsce,dfhist],axis=1,join='inner',sort=False) | pandas.concat |
from __future__ import print_function
import os
import sys
###########################################################
# Change to your own library path
###########################################################
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
import pytz
# date_time format
date_time_format = '%Y-%m-%dT%H:%M:%S.%f'
date_format = '%Y-%m-%d'
ema_col = ['id', 'survey_type', 'delivered_ts', 'completed_ts', 'activity', 'location', 'atypical', 'stress',
'stressor_partner', 'stressor_fam', 'stressor_breakdown', 'stressor_money', 'stressor_selfcare', 'stressor_health',
'stressor_otherhealth', 'stressor_household', 'stressor_child', 'stressor_discrimination', 'stressor_none',
'moststressful', 'moststressful_time', 'work_location', 'attend_fidam', 'attend_fidpm', 'attend_hasp',
'attend_pgy1did', 'attend_pgy2did', 'attend_pgy3did', 'attend_none', 'work_start', 'work_end',
'jobperformance', 'jobperformance_best', 'jobsatisfaction', 'sleepquant', 'sleepqual', 'alcoholuse',
'alcohol_total', 'tobaccouse', 'tobacco_total', 'physactivity', 'physactivity_total',
'workstressor_computer', 'workstressor_patientint', 'workstressor_conflict', 'workstressor_census',
'workstressor_late', 'workstressor_paged', 'workstressor_supervise', 'workstressor_admin',
'workstressor_diffcases', 'workstressor_death', 'charting', 'charting_total', 'coworkertrust',
'work_inperson', 'work_digital', 'support_inperson', 'support_digital', 'socialevents', 'hangouts', 'wellness']
pt = pytz.timezone('US/Pacific')
def make_dir(data_path):
if os.path.exists(data_path) is False:
os.mkdir(data_path)
def check_micu_data_valid(data_time, start_date1, end_date1, start_date2, end_date2):
cond1 = (pd.to_datetime(data_time) - pd.to_datetime(start_date1)).total_seconds() >= 0
cond2 = (pd.to_datetime(end_date1) + timedelta(days=1) - pd.to_datetime(data_time)).total_seconds() >= 0
cond3 = False
cond4 = False
if start_date2 != 'nan':
cond3 = (pd.to_datetime(data_time) - pd.to_datetime(start_date2)).total_seconds() >= 0
cond4 = (pd.to_datetime(end_date2) + timedelta(days=1) - pd.to_datetime(data_time)).total_seconds() >= 0
if (cond1 and cond2):
return True
elif (cond3 and cond4):
return True
else:
return False
if __name__ == '__main__':
# Read data root path
participant_info_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir, 'participant-info'))
saving_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir, 'tiles-phase2-opendataset'))
# id,started_ts,completed_ts,duration,weekcompleted,gender,traininglevel
# Phase1Training_IGTB.csv
save_igtb_df = pd.DataFrame()
study_period = pd.read_csv(os.path.join(participant_info_path, 'study-periods.csv'), index_col=0)
ema_df = pd.read_csv(os.path.join(saving_path, 'surveys', 'p2_ema_public_5.21.csv'))
stress_coded_df = pd.read_csv(os.path.join(saving_path, 'surveys', 'MostStressful_SDLS.csv'))
best_coded_df = pd.read_csv(os.path.join(saving_path, 'surveys', 'PerformBest_SDLS.csv'))
atypical_coded_df = pd.read_csv(os.path.join(saving_path, 'surveys', 'Atypical_SDLS.csv'))
participant_list = list(study_period.index)
participant_list.sort()
micu_df = pd.read_csv(os.path.join(participant_info_path, 'p2_micuschedules_public_5.21.csv'), index_col=0)
micu_df = micu_df.dropna(subset=['MICU Start Date 1'])
final_ema_df = pd.DataFrame()
for id in participant_list:
participant_df = ema_df.loc[ema_df['id'] == id]
micu_start1 = pd.to_datetime(micu_df.loc[id, 'MICU Start Date 1']).strftime(date_time_format)[:-3]
micu_end1 = pd.to_datetime(micu_df.loc[id, 'MICU End Date 1']).strftime(date_time_format)[:-3]
micu_start2 = str(micu_df.loc[id, 'MICU Start Date 2'])
micu_end2 = str(micu_df.loc[id, 'MICU End Date 2'])
if 'e7dc' in id:
print()
if str(micu_start2) != 'nan':
micu_start2 = | pd.to_datetime(micu_start2) | pandas.to_datetime |
import json
import logging
import pdb
import numpy as np
import pandas as pd
import arrow
from datetime import datetime
from bd3client.CentralService import CentralService
from bd3client.Sensor import Sensor
from bd3client.Timeseries import Timeseries
from building_depot import DataService, BDError
PST = 'US/Pacific'
# Basic configuration
begin_time_bd2_1 = arrow.get(datetime(2017,1,20), PST).datetime
end_time_bd2_1 = arrow.get(datetime(2017,2,8), PST).datetime
#begin_time_bd3 = arrow.get(datetime(2017,2,1), PST).datetime
#end_time_bd3 = arrow.get(datetime(2017,2,20), PST).datetime
begin_time_bd2_2 = arrow.get(datetime(2015,1,10), PST).datetime
end_time_bd2_2 = arrow.get(datetime(2017,5,10), PST).datetime
#building_name_list = ['AP_M']
#building_name_list = ['Music']
#building_name_list = ['AP_M', 'EBU3B', 'SME', 'Music']
building_name_list = ['BML']
basedir = "data"
header = ['value']
index_label="time"
# Logger configuration
logger = logging.getLogger("data_collection_log")
logger.setLevel(logging.INFO)
log_handler = logging.FileHandler('log/data_collection.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
logger.addHandler(log_handler)
# BD2-1 Initialization
with open("config/bd2-1config.json", "r") as fp:
hostname = json.load(fp)['hostname']
with open("config/bd2-1secrets.json", "r") as fp:
secrets = json.load(fp)
username = secrets['username']
apikey = secrets['apikey']
bd2_1ds = DataService(hostname, apikey, username)
# BD2-2 Initialization
with open("config/bd2-2config.json", "r") as fp:
hostname = json.load(fp)['hostname']
with open("config/bd2-2secrets.json", "r") as fp:
secrets = json.load(fp)
username = secrets['username']
apikey = secrets['apikey']
bd2_2ds = DataService(hostname, apikey, username)
# BD3 Initialization
with open("config/bd3config.json", "r") as fp:
hostname = json.load(fp)['hostname']
with open("config/bd3secrets.json", "r") as fp:
secrets = json.load(fp)
cid = secrets['cid']
csecret = secrets['csecret']
bd3cs = CentralService(hostname, cid, csecret)
bd3sensor_api = Sensor(bd3cs)
bd3ts_api = Timeseries(bd3cs)
#Load basic metadata
with open("metadata/bacnet_devices.json", "r") as fp:
bacnet_devices = json.load(fp)
with open("metadata/building_info.json", "r") as fp:
building_dict = json.load(fp)
def resample_data(raw_data, begin_time, end_time, sample_method):
raw_data = raw_data[begin_time:end_time]
if not begin_time in raw_data.index:
raw_data[arrow.get(begin_time).to('UTC').datetime] = raw_data.head(1)[0]
if not end_time in raw_data.index:
raw_data[arrow.get(end_time).to('UTC').datetime] = raw_data.tail(1)[0]
raw_data = raw_data.sort_index()
if sample_method == 'raw':
proc_data = raw_data
pass
elif sample_method == 'nextval':
proc_data = raw_data.resample('3Min', how='pad')
else:
logger.error("sample method not defined well: {0}".format(sample_method))
assert(False)
return proc_data
def get_data_bd2(begin_time, end_time, srcid, bd2ds):
# Get UUID
"""
try:
uuid = bd2ds.list_sensors({'source_identifier':srcid})\
['sensors'][0]['uuid']
except:
logger.error("UUID not found ({0})".format(srcid))
return None
"""
uuid = bd2ds.list_sensors({'source_identifier':srcid})\
['sensors'][0]['uuid']
# Get data
"""
try:
raw_data = bd2ds.get_timeseries_datapoints(uuid, 'PresentValue', \
begin_time, end_time)
except:
logger.error("Data not found ({0})".format(srcid))
return None
"""
raw_data = bd2ds.get_timeseries_datapoints(uuid, 'PresentValue', \
begin_time, end_time)
# pdb.set_trace()
time_list = list()
value_list = list()
for row in raw_data['timeseries']:
for t, v in row.items():
time_str = t
value = v
break
from dateutil import tz
time_list.append(arrow.get(time_str).to('UTC').datetime)
value_list.append(value)
return | pd.Series(index=time_list, data=value_list) | pandas.Series |
from unittest.mock import patch
import pandas as pd
import pytest
import woodwork as ww
@patch('evalml.pipelines.ClassificationPipeline._decode_targets', return_value=[0, 1])
@patch('evalml.objectives.BinaryClassificationObjective.decision_function', return_value= | pd.Series([1, 0]) | pandas.Series |
from datetime import datetime
import warnings
import pytest
import numpy as np
import pandas as pd
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, assert_dask_graph, make_meta
@pytest.mark.slow
def test_arithmetics():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
ddf1 = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
pdf1 = ddf1.compute()
pdf2 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]})
pdf3 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]})
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
dsk4 = {('y', 0): pd.DataFrame({'a': [3, 2, 1], 'b': [7, 8, 9]},
index=[0, 1, 3]),
('y', 1): pd.DataFrame({'a': [5, 2, 8], 'b': [4, 2, 3]},
index=[5, 6, 8]),
('y', 2): pd.DataFrame({'a': [1, 4, 10], 'b': [1, 0, 5]},
index=[9, 9, 9])}
ddf4 = dd.DataFrame(dsk4, 'y', meta, [0, 4, 9, 9])
pdf4 = ddf4.compute()
# Arithmetics
cases = [(ddf1, ddf1, pdf1, pdf1),
(ddf1, ddf1.repartition([0, 1, 3, 6, 9]), pdf1, pdf1),
(ddf2, ddf3, pdf2, pdf3),
(ddf2.repartition([0, 3, 6, 7]), ddf3.repartition([0, 7]),
pdf2, pdf3),
(ddf2.repartition([0, 7]), ddf3.repartition([0, 2, 4, 5, 7]),
pdf2, pdf3),
(ddf1, ddf4, pdf1, pdf4),
(ddf1, ddf4.repartition([0, 9]), pdf1, pdf4),
(ddf1.repartition([0, 3, 9]), ddf4.repartition([0, 5, 9]),
pdf1, pdf4),
# dask + pandas
(ddf1, pdf4, pdf1, pdf4), (ddf2, pdf3, pdf2, pdf3)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b)
check_frame_arithmetics(l, r, el, er)
# different index, pandas raises ValueError in comparison ops
pdf5 = pd.DataFrame({'a': [3, 2, 1, 5, 2, 8, 1, 4, 10],
'b': [7, 8, 9, 4, 2, 3, 1, 0, 5]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [3, 2, 1, 5, 2, 8, 1, 4, 10],
'b': [7, 8, 9, 5, 7, 8, 4, 2, 5]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 9])
ddf6 = dd.from_pandas(pdf6, 4)
pdf7 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=list('aaabcdeh'))
pdf8 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=list('abcdefgh'))
ddf7 = dd.from_pandas(pdf7, 3)
ddf8 = dd.from_pandas(pdf8, 4)
pdf9 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4],
'c': [5, 6, 7, 8, 1, 2, 3, 4]},
index=list('aaabcdeh'))
pdf10 = pd.DataFrame({'b': [5, 6, 7, 8, 4, 3, 2, 1],
'c': [2, 4, 5, 3, 4, 2, 1, 0],
'd': [2, 4, 5, 3, 4, 2, 1, 0]},
index=list('abcdefgh'))
ddf9 = dd.from_pandas(pdf9, 3)
ddf10 = dd.from_pandas(pdf10, 4)
# Arithmetics with different index
cases = [(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 9]), ddf6, pdf5, pdf6),
(ddf5.repartition([0, 5, 9]), ddf6.repartition([0, 7, 9]),
pdf5, pdf6),
(ddf7, ddf8, pdf7, pdf8),
(ddf7.repartition(['a', 'c', 'h']), ddf8.repartition(['a', 'h']),
pdf7, pdf8),
(ddf7.repartition(['a', 'b', 'e', 'h']),
ddf8.repartition(['a', 'e', 'h']), pdf7, pdf8),
(ddf9, ddf10, pdf9, pdf10),
(ddf9.repartition(['a', 'c', 'h']), ddf10.repartition(['a', 'h']),
pdf9, pdf10),
# dask + pandas
(ddf5, pdf6, pdf5, pdf6), (ddf7, pdf8, pdf7, pdf8),
(ddf9, pdf10, pdf9, pdf10)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
def test_deterministic_arithmetic_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted((a.x + a.y ** 2).dask) == sorted((a.x + a.y ** 2).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x + a.y ** 3).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x - a.y ** 2).dask)
@pytest.mark.slow
def test_arithmetics_different_index():
# index are different, but overwraps
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 2, 3, 4, 5])
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[3, 4, 5, 6, 7])
ddf2 = dd.from_pandas(pdf2, 2)
# index are not overwrapped
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 2, 3, 4, 5])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[10, 11, 12, 13, 14])
ddf4 = dd.from_pandas(pdf4, 2)
# index is included in another
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 3, 5, 7, 9])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[2, 3, 4, 5, 6])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf2, ddf1, pdf2, pdf1),
(ddf1.repartition([1, 3, 5]), ddf2.repartition([3, 4, 7]),
pdf1, pdf2),
(ddf2.repartition([3, 4, 5, 7]), ddf1.repartition([1, 2, 4, 5]),
pdf2, pdf1),
(ddf3, ddf4, pdf3, pdf4),
(ddf4, ddf3, pdf4, pdf3),
(ddf3.repartition([1, 2, 3, 4, 5]),
ddf4.repartition([10, 11, 12, 13, 14]), pdf3, pdf4),
(ddf4.repartition([10, 14]), ddf3.repartition([1, 3, 4, 5]),
pdf4, pdf3),
(ddf5, ddf6, pdf5, pdf6),
(ddf6, ddf5, pdf6, pdf5),
(ddf5.repartition([1, 7, 8, 9]), ddf6.repartition([2, 3, 4, 6]),
pdf5, pdf6),
(ddf6.repartition([2, 6]), ddf5.repartition([1, 3, 7, 9]),
pdf6, pdf5),
# dask + pandas
(ddf1, pdf2, pdf1, pdf2), (ddf2, pdf1, pdf2, pdf1),
(ddf3, pdf4, pdf3, pdf4), (ddf4, pdf3, pdf4, pdf3),
(ddf5, pdf6, pdf5, pdf6), (ddf6, pdf5, pdf6, pdf5)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
pdf7 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=[0, 2, 4, 8, 9, 10, 11, 13])
pdf8 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=[1, 3, 4, 8, 9, 11, 12, 13])
ddf7 = dd.from_pandas(pdf7, 3)
ddf8 = dd.from_pandas(pdf8, 2)
pdf9 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=[0, 2, 4, 8, 9, 10, 11, 13])
pdf10 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=[0, 3, 4, 8, 9, 11, 12, 13])
ddf9 = dd.from_pandas(pdf9, 3)
ddf10 = dd.from_pandas(pdf10, 2)
cases = [(ddf7, ddf8, pdf7, pdf8),
(ddf8, ddf7, pdf8, pdf7),
# (ddf7.repartition([0, 13]),
# ddf8.repartition([0, 4, 11, 14], force=True),
# pdf7, pdf8),
(ddf8.repartition([-5, 10, 15], force=True),
ddf7.repartition([-1, 4, 11, 14], force=True), pdf8, pdf7),
(ddf7.repartition([0, 8, 12, 13]),
ddf8.repartition([0, 2, 8, 12, 13], force=True), pdf7, pdf8),
(ddf8.repartition([-5, 0, 10, 20], force=True),
ddf7.repartition([-1, 4, 11, 13], force=True), pdf8, pdf7),
(ddf9, ddf10, pdf9, pdf10),
(ddf10, ddf9, pdf10, pdf9),
# dask + pandas
(ddf7, pdf8, pdf7, pdf8), (ddf8, pdf7, pdf8, pdf7),
(ddf9, pdf10, pdf9, pdf10), (ddf10, pdf9, pdf10, pdf9)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
def check_series_arithmetics(l, r, el, er, allow_comparison_ops=True):
assert isinstance(l, dd.Series)
assert isinstance(r, (dd.Series, pd.Series))
assert isinstance(el, pd.Series)
assert isinstance(er, pd.Series)
# l, r may be repartitioned, test whether repartition keeps original data
assert_eq(l, el)
assert_eq(r, er)
assert_eq(l + r, el + er)
assert_eq(l * r, el * er)
assert_eq(l - r, el - er)
assert_eq(l / r, el / er)
assert_eq(l // r, el // er)
assert_eq(l ** r, el ** er)
assert_eq(l % r, el % er)
if allow_comparison_ops:
# comparison is allowed if data have same index
assert_eq(l & r, el & er)
assert_eq(l | r, el | er)
assert_eq(l ^ r, el ^ er)
assert_eq(l > r, el > er)
assert_eq(l < r, el < er)
assert_eq(l >= r, el >= er)
assert_eq(l <= r, el <= er)
assert_eq(l == r, el == er)
assert_eq(l != r, el != er)
assert_eq(l.lt(r), el.lt(er))
assert_eq(l.gt(r), el.gt(er))
assert_eq(l.le(r), el.le(er))
assert_eq(l.ge(r), el.ge(er))
assert_eq(l.ne(r), el.ne(er))
assert_eq(l.eq(r), el.eq(er))
assert_eq(l + 2, el + 2)
assert_eq(l * 2, el * 2)
assert_eq(l - 2, el - 2)
assert_eq(l / 2, el / 2)
assert_eq(l & True, el & True)
assert_eq(l | True, el | True)
assert_eq(l ^ True, el ^ True)
assert_eq(l // 2, el // 2)
assert_eq(l ** 2, el ** 2)
assert_eq(l % 2, el % 2)
assert_eq(l > 2, el > 2)
assert_eq(l < 2, el < 2)
assert_eq(l >= 2, el >= 2)
assert_eq(l <= 2, el <= 2)
assert_eq(l == 2, el == 2)
assert_eq(l != 2, el != 2)
assert_eq(2 + r, 2 + er)
assert_eq(2 * r, 2 * er)
assert_eq(2 - r, 2 - er)
assert_eq(2 / r, 2 / er)
assert_eq(True & r, True & er)
assert_eq(True | r, True | er)
assert_eq(True ^ r, True ^ er)
assert_eq(2 // r, 2 // er)
assert_eq(2 ** r, 2 ** er)
assert_eq(2 % r, 2 % er)
assert_eq(2 > r, 2 > er)
assert_eq(2 < r, 2 < er)
assert_eq(2 >= r, 2 >= er)
assert_eq(2 <= r, 2 <= er)
assert_eq(2 == r, 2 == er)
assert_eq(2 != r, 2 != er)
assert_eq(l.lt(2), el.lt(2))
assert_eq(l.gt(2), el.gt(2))
assert_eq(l.le(2), el.le(2))
assert_eq(l.ge(2), el.ge(2))
assert_eq(l.ne(2), el.ne(2))
assert_eq(l.eq(2), el.eq(2))
assert_eq(-l, -el)
assert_eq(abs(l), abs(el))
if allow_comparison_ops:
# comparison is allowed if data have same index
assert_eq(~(l == r), ~(el == er))
def check_frame_arithmetics(l, r, el, er, allow_comparison_ops=True):
assert isinstance(l, dd.DataFrame)
assert isinstance(r, (dd.DataFrame, pd.DataFrame))
assert isinstance(el, pd.DataFrame)
assert isinstance(er, pd.DataFrame)
# l, r may be repartitioned, test whether repartition keeps original data
assert_eq(l, el)
assert_eq(r, er)
assert_eq(l + r, el + er)
assert_eq(l * r, el * er)
assert_eq(l - r, el - er)
assert_eq(l / r, el / er)
assert_eq(l // r, el // er)
assert_eq(l ** r, el ** er)
assert_eq(l % r, el % er)
if allow_comparison_ops:
# comparison is allowed if data have same index
assert_eq(l & r, el & er)
assert_eq(l | r, el | er)
assert_eq(l ^ r, el ^ er)
assert_eq(l > r, el > er)
assert_eq(l < r, el < er)
assert_eq(l >= r, el >= er)
assert_eq(l <= r, el <= er)
assert_eq(l == r, el == er)
assert_eq(l != r, el != er)
assert_eq(l.lt(r), el.lt(er))
assert_eq(l.gt(r), el.gt(er))
assert_eq(l.le(r), el.le(er))
assert_eq(l.ge(r), el.ge(er))
assert_eq(l.ne(r), el.ne(er))
assert_eq(l.eq(r), el.eq(er))
assert_eq(l + 2, el + 2)
assert_eq(l * 2, el * 2)
assert_eq(l - 2, el - 2)
assert_eq(l / 2, el / 2)
assert_eq(l & True, el & True)
assert_eq(l | True, el | True)
assert_eq(l ^ True, el ^ True)
assert_eq(l // 2, el // 2)
assert_eq(l ** 2, el ** 2)
assert_eq(l % 2, el % 2)
assert_eq(l > 2, el > 2)
assert_eq(l < 2, el < 2)
assert_eq(l >= 2, el >= 2)
assert_eq(l <= 2, el <= 2)
assert_eq(l == 2, el == 2)
assert_eq(l != 2, el != 2)
assert_eq(2 + l, 2 + el)
assert_eq(2 * l, 2 * el)
assert_eq(2 - l, 2 - el)
assert_eq(2 / l, 2 / el)
assert_eq(True & l, True & el)
assert_eq(True | l, True | el)
assert_eq(True ^ l, True ^ el)
assert_eq(2 // l, 2 // el)
assert_eq(2 ** l, 2 ** el)
assert_eq(2 % l, 2 % el)
assert_eq(2 > l, 2 > el)
assert_eq(2 < l, 2 < el)
assert_eq(2 >= l, 2 >= el)
assert_eq(2 <= l, 2 <= el)
assert_eq(2 == l, 2 == el)
assert_eq(2 != l, 2 != el)
assert_eq(l.lt(2), el.lt(2))
assert_eq(l.gt(2), el.gt(2))
assert_eq(l.le(2), el.le(2))
assert_eq(l.ge(2), el.ge(2))
assert_eq(l.ne(2), el.ne(2))
assert_eq(l.eq(2), el.eq(2))
assert_eq(-l, -el)
assert_eq(abs(l), abs(el))
if allow_comparison_ops:
# comparison is allowed if data have same index
assert_eq(~(l == r), ~(el == er))
def test_scalar_arithmetics():
el = np.int64(10)
er = np.int64(4)
l = dd.core.Scalar({('l', 0): el}, 'l', 'i8')
r = dd.core.Scalar({('r', 0): er}, 'r', 'i8')
assert isinstance(l, dd.core.Scalar)
assert isinstance(r, dd.core.Scalar)
assert_eq(l, el)
assert_eq(r, er)
assert_eq(l + r, el + er)
assert_eq(l * r, el * er)
assert_eq(l - r, el - er)
assert_eq(l / r, el / er)
assert_eq(l // r, el // er)
assert_eq(l ** r, el ** er)
assert_eq(l % r, el % er)
assert_eq(l & r, el & er)
assert_eq(l | r, el | er)
assert_eq(l ^ r, el ^ er)
assert_eq(l > r, el > er)
assert_eq(l < r, el < er)
assert_eq(l >= r, el >= er)
assert_eq(l <= r, el <= er)
assert_eq(l == r, el == er)
assert_eq(l != r, el != er)
assert_eq(l + 2, el + 2)
assert_eq(l * 2, el * 2)
assert_eq(l - 2, el - 2)
assert_eq(l / 2, el / 2)
assert_eq(l & True, el & True)
assert_eq(l | True, el | True)
assert_eq(l ^ True, el ^ True)
assert_eq(l // 2, el // 2)
assert_eq(l ** 2, el ** 2)
assert_eq(l % 2, el % 2)
assert_eq(l > 2, el > 2)
assert_eq(l < 2, el < 2)
assert_eq(l >= 2, el >= 2)
assert_eq(l <= 2, el <= 2)
assert_eq(l == 2, el == 2)
assert_eq(l != 2, el != 2)
assert_eq(2 + r, 2 + er)
assert_eq(2 * r, 2 * er)
assert_eq(2 - r, 2 - er)
assert_eq(2 / r, 2 / er)
assert_eq(True & r, True & er)
assert_eq(True | r, True | er)
assert_eq(True ^ r, True ^ er)
assert_eq(2 // r, 2 // er)
assert_eq(2 ** r, 2 ** er)
assert_eq(2 % r, 2 % er)
assert_eq(2 > r, 2 > er)
assert_eq(2 < r, 2 < er)
assert_eq(2 >= r, 2 >= er)
assert_eq(2 <= r, 2 <= er)
assert_eq(2 == r, 2 == er)
assert_eq(2 != r, 2 != er)
assert_eq(-l, -el)
assert_eq(abs(l), abs(el))
assert_eq(~(l == r), ~(el == er))
def test_scalar_arithmetics_with_dask_instances():
s = dd.core.Scalar({('s', 0): 10}, 's', 'i8')
e = 10
pds = pd.Series([1, 2, 3, 4, 5, 6, 7])
dds = dd.from_pandas(pds, 2)
pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(pdf, 2)
# pandas Series
result = pds + s # this result pd.Series (automatically computed)
assert isinstance(result, pd.Series)
assert_eq(result, pds + e)
result = s + pds # this result dd.Series
assert isinstance(result, dd.Series)
assert_eq(result, pds + e)
# dask Series
result = dds + s # this result dd.Series
assert isinstance(result, dd.Series)
assert_eq(result, pds + e)
result = s + dds # this result dd.Series
assert isinstance(result, dd.Series)
assert_eq(result, pds + e)
# pandas DataFrame
result = pdf + s # this result pd.DataFrame (automatically computed)
assert isinstance(result, pd.DataFrame)
assert_eq(result, pdf + e)
result = s + pdf # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert_eq(result, pdf + e)
# dask DataFrame
result = ddf + s # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert_eq(result, pdf + e)
result = s + ddf # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert_eq(result, pdf + e)
def test_frame_series_arithmetic_methods():
pdf1 = pd.DataFrame({'A': np.arange(10),
'B': [np.nan, 1, 2, 3, 4] * 2,
'C': [np.nan] * 10,
'D': np.arange(10)},
index=list('abcdefghij'), columns=list('ABCD'))
pdf2 = pd.DataFrame(np.random.randn(10, 4),
index=list('abcdefghjk'), columns=list('ABCX'))
ps1 = pdf1.A
ps2 = pdf2.A
ps3 = pd.Series(np.random.randn(10), index=list('ABCDXabcde'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 2)
ds1 = ddf1.A
ds2 = ddf2.A
s = dd.core.Scalar({('s', 0): 4}, 's', 'i8')
for l, r, el, er in [(ddf1, ddf2, pdf1, pdf2), (ds1, ds2, ps1, ps2),
(ddf1.repartition(['a', 'f', 'j']), ddf2, pdf1, pdf2),
(ds1.repartition(['a', 'b', 'f', 'j']), ds2, ps1, ps2),
(ddf1, ddf2.repartition(['a', 'k']), pdf1, pdf2),
(ds1, ds2.repartition(['a', 'b', 'd', 'h', 'k']), ps1, ps2),
(ddf1, 3, pdf1, 3), (ds1, 3, ps1, 3),
(ddf1, s, pdf1, 4), (ds1, s, ps1, 4)]:
# l, r may be repartitioned, test whether repartition keeps original data
assert_eq(l, el)
assert_eq(r, er)
assert_eq(l.add(r, fill_value=0), el.add(er, fill_value=0))
assert_eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))
assert_eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))
assert_eq(l.div(r, fill_value=0), el.div(er, fill_value=0))
assert_eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))
assert_eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))
assert_eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))
assert_eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))
assert_eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))
assert_eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))
assert_eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))
assert_eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))
assert_eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))
assert_eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))
assert_eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))
assert_eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))
for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:
assert_eq(l, el)
assert_eq(r, er)
# must specify axis=0 to add Series to each column
# axis=1 is not supported (add to each row)
assert_eq(l.add(r, axis=0), el.add(er, axis=0))
assert_eq(l.sub(r, axis=0), el.sub(er, axis=0))
assert_eq(l.mul(r, axis=0), el.mul(er, axis=0))
assert_eq(l.div(r, axis=0), el.div(er, axis=0))
assert_eq(l.truediv(r, axis=0), el.truediv(er, axis=0))
assert_eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))
assert_eq(l.mod(r, axis=0), el.mod(er, axis=0))
assert_eq(l.pow(r, axis=0), el.pow(er, axis=0))
assert_eq(l.radd(r, axis=0), el.radd(er, axis=0))
assert_eq(l.rsub(r, axis=0), el.rsub(er, axis=0))
assert_eq(l.rmul(r, axis=0), el.rmul(er, axis=0))
assert_eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))
assert_eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))
assert_eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))
assert_eq(l.rmod(r, axis=0), el.rmod(er, axis=0))
assert_eq(l.rpow(r, axis=0), el.rpow(er, axis=0))
pytest.raises(ValueError, lambda: l.add(r, axis=1))
for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps3, pdf1, ps3)]:
assert_eq(l, el)
assert_eq(r, er)
for axis in [0, 1, 'index', 'columns']:
assert_eq(l.add(r, axis=axis), el.add(er, axis=axis))
assert_eq(l.sub(r, axis=axis), el.sub(er, axis=axis))
assert_eq(l.mul(r, axis=axis), el.mul(er, axis=axis))
assert_eq(l.div(r, axis=axis), el.div(er, axis=axis))
assert_eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))
assert_eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))
assert_eq(l.mod(r, axis=axis), el.mod(er, axis=axis))
assert_eq(l.pow(r, axis=axis), el.pow(er, axis=axis))
assert_eq(l.radd(r, axis=axis), el.radd(er, axis=axis))
assert_eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))
assert_eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))
assert_eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))
assert_eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))
assert_eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))
assert_eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))
assert_eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))
@pytest.mark.parametrize('split_every', [False, 2])
def test_reductions(split_every):
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
ddf1 = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
pdf1 = ddf1.compute()
nans1 = pd.Series([1] + [np.nan] * 4 + [2] + [np.nan] * 3)
nands1 = dd.from_pandas(nans1, 2)
nans2 = pd.Series([1] + [np.nan] * 8)
nands2 = dd.from_pandas(nans2, 2)
nans3 = pd.Series([np.nan] * 9)
nands3 = dd.from_pandas(nans3, 2)
bools = pd.Series([True, False, True, False, True], dtype=bool)
boolds = dd.from_pandas(bools, 2)
for dds, pds in [(ddf1.b, pdf1.b), (ddf1.a, pdf1.a),
(ddf1['a'], pdf1['a']), (ddf1['b'], pdf1['b']),
(nands1, nans1), (nands2, nans2), (nands3, nans3),
(boolds, bools)]:
assert isinstance(dds, dd.Series)
assert isinstance(pds, pd.Series)
assert_eq(dds.sum(split_every=split_every), pds.sum())
assert_eq(dds.prod(split_every=split_every), pds.prod())
assert_eq(dds.min(split_every=split_every), pds.min())
assert_eq(dds.max(split_every=split_every), pds.max())
assert_eq(dds.count(split_every=split_every), pds.count())
with pytest.warns(None):
# runtime warnings; https://github.com/dask/dask/issues/2381
assert_eq(dds.std(split_every=split_every), pds.std())
with pytest.warns(None):
# runtime warnings; https://github.com/dask/dask/issues/2381
assert_eq(dds.var(split_every=split_every), pds.var())
with pytest.warns(None):
# runtime warnings; https://github.com/dask/dask/issues/2381
assert_eq(dds.sem(split_every=split_every), pds.sem())
assert_eq(dds.std(ddof=0, split_every=split_every), pds.std(ddof=0))
assert_eq(dds.var(ddof=0, split_every=split_every), pds.var(ddof=0))
assert_eq(dds.sem(ddof=0, split_every=split_every), pds.sem(ddof=0))
assert_eq(dds.mean(split_every=split_every), pds.mean())
assert_eq(dds.nunique(split_every=split_every), pds.nunique())
assert_eq(dds.sum(skipna=False, split_every=split_every),
pds.sum(skipna=False))
assert_eq(dds.prod(skipna=False, split_every=split_every),
pds.prod(skipna=False))
assert_eq(dds.min(skipna=False, split_every=split_every),
pds.min(skipna=False))
assert_eq(dds.max(skipna=False, split_every=split_every),
pds.max(skipna=False))
assert_eq(dds.std(skipna=False, split_every=split_every),
pds.std(skipna=False))
assert_eq(dds.var(skipna=False, split_every=split_every),
pds.var(skipna=False))
assert_eq(dds.sem(skipna=False, split_every=split_every),
pds.sem(skipna=False))
assert_eq(dds.std(skipna=False, ddof=0, split_every=split_every),
pds.std(skipna=False, ddof=0))
assert_eq(dds.var(skipna=False, ddof=0, split_every=split_every),
pds.var(skipna=False, ddof=0))
assert_eq(dds.sem(skipna=False, ddof=0, split_every=split_every),
pds.sem(skipna=False, ddof=0))
assert_eq(dds.mean(skipna=False, split_every=split_every),
pds.mean(skipna=False))
assert_dask_graph(ddf1.b.sum(split_every=split_every), 'series-sum')
assert_dask_graph(ddf1.b.prod(split_every=split_every), 'series-prod')
assert_dask_graph(ddf1.b.min(split_every=split_every), 'series-min')
assert_dask_graph(ddf1.b.max(split_every=split_every), 'series-max')
assert_dask_graph(ddf1.b.count(split_every=split_every), 'series-count')
assert_dask_graph(ddf1.b.std(split_every=split_every), 'series-std')
assert_dask_graph(ddf1.b.var(split_every=split_every), 'series-var')
assert_dask_graph(ddf1.b.sem(split_every=split_every), 'series-sem')
assert_dask_graph(ddf1.b.std(ddof=0, split_every=split_every), 'series-std')
assert_dask_graph(ddf1.b.var(ddof=0, split_every=split_every), 'series-var')
assert_dask_graph(ddf1.b.sem(ddof=0, split_every=split_every), 'series-sem')
assert_dask_graph(ddf1.b.mean(split_every=split_every), 'series-mean')
# nunique is performed using drop-duplicates
assert_dask_graph(ddf1.b.nunique(split_every=split_every), 'drop-duplicates')
# testing index
assert_eq(ddf1.index.min(split_every=split_every), pdf1.index.min())
assert_eq(ddf1.index.max(split_every=split_every), pdf1.index.max())
assert_eq(ddf1.index.count(split_every=split_every), | pd.notnull(pdf1.index) | pandas.notnull |
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from pandas import testing as pdt
from rle_array.autoconversion import auto_convert_to_rle, decompress
from rle_array.dtype import RLEDtype
pytestmark = pytest.mark.filterwarnings("ignore:performance")
@pytest.mark.parametrize(
"orig, threshold, expected",
[
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# threshold
None,
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=RLEDtype(np.int32)),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=RLEDtype(np.bool_)),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# threshold
2.0,
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
None,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series(
[1, 1, 1, 2, 2, 2], dtype=RLEDtype(np.int64)
),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.9,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series(
[1, 1, 1, 2, 2, 2], dtype=RLEDtype(np.int64)
),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.5,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.0,
# expected
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
# threshold
0.0,
# expected
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
),
(
# orig
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
# threshold
0.1,
# expected
pd.DataFrame({"x": pd.Series([], dtype=RLEDtype(np.int64))}),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
# threshold
0.5,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
),
(
# orig
pd.DataFrame({"x": pd.Series(range(10), dtype=np.int64)}),
# threshold
1.0,
# expected
pd.DataFrame({"x": pd.Series(range(10), dtype=np.int64)}),
),
(
# orig
pd.DataFrame(),
# threshold
None,
# expected
pd.DataFrame(),
),
],
)
@pytest.mark.filterwarnings("ignore:.*would use a DatetimeBlock:UserWarning")
def test_auto_convert_to_rle_ok(
orig: pd.DataFrame, threshold: Optional[float], expected: pd.DataFrame
) -> None:
actual = auto_convert_to_rle(orig, threshold)
pdt.assert_frame_equal(actual, expected)
def test_datetime_warns() -> None:
df = pd.DataFrame(
{
"i1": pd.Series([1], dtype=np.int64),
"d1": pd.Series([pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"),
"i2": pd.Series([1], dtype=np.int64),
"d2": pd.Series([pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"),
}
)
with pytest.warns(None) as record:
auto_convert_to_rle(df, 0.5)
assert len(record) == 2
assert (
str(record[0].message)
== "Column d1 would use a DatetimeBlock and can currently not be RLE compressed."
)
assert (
str(record[1].message)
== "Column d2 would use a DatetimeBlock and can currently not be RLE compressed."
)
def test_auto_convert_to_rle_threshold_out_of_range() -> None:
df = pd.DataFrame({"x": [1]})
with pytest.raises(ValueError, match=r"threshold \(-0.1\) must be non-negative"):
auto_convert_to_rle(df, -0.1)
@pytest.mark.parametrize(
"orig, expected",
[
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=RLEDtype(np.int32)),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=RLEDtype(np.bool_)),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": | pd.Series([1], dtype=np.int32) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 18:10:18 2019
@author: <NAME>
Code will plot the keypoint coordinates vs time in order to assign the maximum
value from this plot to the real-world distance measurement. This will be
the label.
Meeting:
"""
import pandas as pd
import matplotlib.pyplot as plt
#Edit data within file.
#Open file and set to a certain variable
df = pd.read_csv('thesavedones.csv', header=None)
#Keypoint locations
nose_data_df= df.iloc[::17,3]
rankle_data_df = df.iloc[16::17,3]
rknee_data_df = df.iloc[14::17,3]
rwrist_data_df = df.iloc[10::17,3]
reye_data_df = df.iloc[2::17,3]
data_sets = [nose_data_df, rankle_data_df, rknee_data_df, rwrist_data_df, reye_data_df]
kps = ["Nose","Right Ankle","Right Knee","Right Wrist", "Right Eye"]
locations = [] #Initializing a list in order to append df at end of loop
#Create loop to perform this data manipulation for each data set:
for i in range(len(data_sets)):
#Remove brackets and spaces before string and after numbers.
data_select = data_sets[i]
part_edit_df = data_select.map(lambda x: x.lstrip('[ ').rstrip(' ]'))
#Replace spaces between numbers with a comma
part_edit2_df = part_edit_df.map(lambda x: x.replace(" ",",").replace(" ",",").replace(" ",","))
#splits string into muliple columns
part_split_df = part_edit2_df.str.split(",",expand=True)
#convert keypoint coordinates to integer values
part_data_ints_col0_df = | pd.to_numeric(part_split_df[0]) | pandas.to_numeric |
# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import DatetimeIndex, MultiIndex
from pandas._libs import hashtable
from pandas.compat import range, u
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(names):
mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
def test_unique_datetimelike():
idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(idx, level):
# GH #17896 - with level= argument
result = idx.unique(level=level)
expected = idx.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
@pytest.mark.parametrize('dropna', [True, False])
def test_get_unique_index(idx, dropna):
mi = idx[[0, 1, 0, 1, 1, 0, 0]]
expected = mi._shallow_copy(mi[[0, 1]])
result = mi._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_labels():
# GH 17464
# Make sure that a MultiIndex with duplicate levels throws a ValueError
with pytest.raises(ValueError):
mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
# And that using set_levels with duplicate levels fails
mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
[1, 2, 1, 2, 3]])
with pytest.raises(ValueError):
mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
[1, 'a', 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
assert mi.names == names
# With .rename()
mi = MultiIndex.from_product([[0, 1]] * 3)
mi = mi.rename(names)
assert mi.names == names
# With .rename(., level=)
mi.rename(names[1], level=1, inplace=True)
mi = mi.rename([names[0], names[2]], level=[0, 2])
assert mi.names == names
def test_duplicate_meta_data():
# GH 10115
mi = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [mi,
mi.set_names([None, None]),
mi.set_names([None, 'Num']),
mi.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
# see fixtures
assert idx.is_unique is True
assert idx.has_duplicates is False
assert idx_dup.is_unique is False
assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
assert mi.is_unique is False
assert mi.has_duplicates is True
def test_has_duplicates_from_tuples():
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), | u('z') | pandas.compat.u |
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
from streamlit.elements import altair
from credit_analysis.toolkit import (
anova_func,
ks_func,
roc_func,
odds_func,
score_shift_func,
acct_table,
swapset,
)
LOSS_PER_BAD_ACCT = 2000
REV_PER_GOOD_ACCT = 100
@st.cache
def load_predictions():
return pd.read_csv("output/preds.csv")
def cdf_charts(good_prob, bad_prob):
"""Plot CDF chart."""
source1 = pd.DataFrame({
"Good loans": np.arange(len(good_prob)) / len(good_prob),
}, index=good_prob)
c1 = altair.generate_chart("line", source1)
source2 = pd.DataFrame({
"Bad loans": np.arange(len(bad_prob)) / len(bad_prob),
}, index=bad_prob)
c2 = altair.generate_chart("line", source2)
return c1 + c2
def roc_chart(fpr, tpr):
"""Plot ROC curve."""
source = pd.DataFrame({"FPR": fpr, "TPR": tpr, "random": fpr})
base = alt.Chart(source).encode(
alt.X('FPR:Q'),
)
line1 = base.mark_line(color="black", strokeDash=[1, 1]).encode(
alt.Y('random:Q'),
)
line2 = base.mark_line().encode(
alt.Y('TPR:Q', title="TPR"),
)
return line1 + line2
def heatmap_chart(df, title=""):
"""Plot custom confusion matrix chart."""
source = df.copy()
source = source.reset_index()
source = pd.melt(source, id_vars="index", value_vars=df.columns)
source.columns = ["m1", "m2", "value"]
base = alt.Chart(source).encode(
alt.X('m1:O', title="New Model"),
alt.Y("m2:O", title="Baseline Model"),
).properties(
width=500,
height=400,
title=title,
)
rects = base.mark_rect().encode(
color='value:Q',
)
text = base.mark_text(
align='center',
baseline='middle',
color='black',
size=12,
dx=0,
).encode(
text='value:Q',
)
return rects + text
def analyse_model():
"""Credit risk analysis: model."""
preds = load_predictions()
y_true = preds["y_valid"].values
y_prob = preds["y_prob"].values
st.subheader('ANOVA')
accept_threshold = st.slider(
"Probability cutoff for approval", min_value=0.0, max_value=1.0, value=0.05, step=0.01)
bad_mean, good_mean, anova = anova_func(y_true, y_prob, accept_threshold)
st.write(f'Mean default rate in group predicted to be bad = `{bad_mean:.4f}`')
st.write(f'Mean default rate in group predicted to be good = `{good_mean:.4f}`')
st.write('ANOVA statistic for difference in default rate between predicted bad and '
f'predicted good = `{anova:.4f}`')
st.subheader('KS Statistic')
bad_prob, good_prob, ks = ks_func(y_true, y_prob)
# st.write(f"KS statistic = `{ks[0]:.4f}`")
# st.write(f"p-value = `{ks[1]}`")
st.altair_chart(
cdf_charts(bad_prob, good_prob).properties(title=f"CDFs: KS statistic = {ks[0]:.4f}"),
use_container_width=True,
)
st.subheader('ROC Curve and Gini Coefficient')
# st.write(f"ROC AUC = `{lrAUC:.4f}`")
roc_auc, gini, fpr, tpr, _ = roc_func(y_true, y_prob)
st.altair_chart(
roc_chart(fpr, tpr).properties(title=f"ROC AUC = {roc_auc:.4f}, Gini = {gini:.4f}"),
use_container_width=True,
)
st.subheader('Odds ratio')
num_bins = 10
tranche_table = odds_func(y_true, y_prob, num_bins)
st.bar_chart(tranche_table[["Pred Proba of Default", "Odds (good to bad)"]].set_index(
"Pred Proba of Default"))
# st.subheader('Table of performance per tranche')
st.write(tranche_table)
def odds_chart(y_true, y_prob, y_baseline, num_bins):
mod_tranche = odds_func(y_true, y_prob, num_bins)
df1 = mod_tranche[["Pred Proba of Default", "Odds (good to bad)"]].copy()
df1["Model"] = "New Model"
bl_tranche = odds_func(y_true, y_baseline, num_bins)
df2 = bl_tranche[["Pred Proba of Default", "Odds (good to bad)"]].copy()
df2["Model"] = "Baseline Model"
source = | pd.concat([df1, df2], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 10:57:15 2019
@author: BaolongSu
TAG/Plot function
LipidAnaGUI2.0 Tab2
or
LA_V1 Tab5
09/13/2019:
classtotal plot font size changed;
classtotal plot title removed "class="
03/10/2020
classtotal plot ci='sd'
11/23/2020
all plot sort by groupname
"""
import numpy as np
import pandas as pd
# from pyopenms import *
import os
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter.messagebox import showinfo
from tkinter import *
import matplotlib.pyplot as plt
import matplotlib
from tkinter import filedialog
import glob
import re
# import statistics
import datetime
from matplotlib.pyplot import cm
import seaborn as sns
def imp_exp(exploc):
exploc.configure(state="normal")
exploc.delete(1.0, END)
exp11 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
exploc.insert(INSERT, exp11)
exploc.configure(state="disabled")
def tagplot(exploc, CheckVar1, CheckVar2):
matplotlib.use('Agg')
start = datetime.datetime.now()
file = exploc.get('1.0', 'end-1c')
# root.destroy()
# set directory
os.chdir(file[0:file.rfind('/')])
# Open Excel Document
lipid_all = pd.read_excel(file, sheet_name='Species Norm', index_col=0, na_values='.')
lipid_all = lipid_all.drop(['SampleNorm', 'NormType'], axis=1)
# Convert to floats
# lipid_all=lipid_all.replace(".",np.nan)
# lipid_all.astype(float)
# Transpose Document
species_all = lipid_all.T
# Create TAG DataFrame
tag_all = species_all[species_all.index.str.startswith(('TAG'))].copy()
# if no TAG, skip tag analysis
if len(tag_all) != 0:
tag_all = tag_all.astype(float)
#####################################
# Create Index for TAG carbon length#
#####################################
# tag_all.columns = tag_all.loc['GroupName']
tag_all['carbon'] = tag_all.index.str.slice(start=3, stop=5)
tag_all.carbon = tag_all.carbon.astype(float)
# Create a Series of all unique TAG carbon length
# car_len = tag_all.carbon.values
# ucar_len = np.unique(car_len)
# Create DataFrame for each Carbon Length
tag_36 = tag_all[(tag_all['carbon'] == 36)].copy()
tag_37 = tag_all[(tag_all['carbon'] == 37)].copy()
tag_38 = tag_all[(tag_all['carbon'] == 38)].copy()
tag_39 = tag_all[(tag_all['carbon'] == 39)].copy()
tag_40 = tag_all[(tag_all['carbon'] == 40)].copy()
tag_41 = tag_all[(tag_all['carbon'] == 41)].copy()
tag_42 = tag_all[(tag_all['carbon'] == 42)].copy()
tag_43 = tag_all[(tag_all['carbon'] == 43)].copy()
tag_44 = tag_all[(tag_all['carbon'] == 44)].copy()
tag_45 = tag_all[(tag_all['carbon'] == 45)].copy()
tag_46 = tag_all[(tag_all['carbon'] == 46)].copy()
tag_47 = tag_all[(tag_all['carbon'] == 47)].copy()
tag_48 = tag_all[(tag_all['carbon'] == 48)].copy()
tag_49 = tag_all[(tag_all['carbon'] == 49)].copy()
tag_50 = tag_all[(tag_all['carbon'] == 50)].copy()
tag_51 = tag_all[(tag_all['carbon'] == 51)].copy()
tag_52 = tag_all[(tag_all['carbon'] == 52)].copy()
tag_53 = tag_all[(tag_all['carbon'] == 53)].copy()
tag_54 = tag_all[(tag_all['carbon'] == 54)].copy()
tag_55 = tag_all[(tag_all['carbon'] == 55)].copy()
tag_56 = tag_all[(tag_all['carbon'] == 56)].copy()
tag_57 = tag_all[(tag_all['carbon'] == 57)].copy()
tag_58 = tag_all[(tag_all['carbon'] == 58)].copy()
tag_59 = tag_all[(tag_all['carbon'] == 59)].copy()
tag_60 = tag_all[(tag_all['carbon'] == 60)].copy()
# Create Summary Dataframe
tag_sum = pd.DataFrame(columns=tag_all.columns)
tag_sum.loc['36'] = pd.Series(tag_36.sum(min_count=1))
tag_sum.loc['37'] = pd.Series(tag_37.sum(min_count=1))
tag_sum.loc['38'] = pd.Series(tag_38.sum(min_count=1))
tag_sum.loc['39'] = pd.Series(tag_39.sum(min_count=1))
tag_sum.loc['40'] = pd.Series(tag_40.sum(min_count=1))
tag_sum.loc['41'] = pd.Series(tag_41.sum(min_count=1))
tag_sum.loc['42'] = pd.Series(tag_42.sum(min_count=1))
tag_sum.loc['43'] = pd.Series(tag_43.sum(min_count=1))
tag_sum.loc['44'] = pd.Series(tag_44.sum(min_count=1))
tag_sum.loc['45'] = pd.Series(tag_45.sum(min_count=1))
tag_sum.loc['46'] = pd.Series(tag_46.sum(min_count=1))
tag_sum.loc['47'] = pd.Series(tag_47.sum(min_count=1))
tag_sum.loc['48'] = pd.Series(tag_48.sum(min_count=1))
tag_sum.loc['49'] = pd.Series(tag_49.sum(min_count=1))
tag_sum.loc['50'] = pd.Series(tag_50.sum(min_count=1))
tag_sum.loc['51'] = pd.Series(tag_51.sum(min_count=1))
tag_sum.loc['52'] = pd.Series(tag_52.sum(min_count=1))
tag_sum.loc['53'] = pd.Series(tag_53.sum(min_count=1))
tag_sum.loc['54'] = pd.Series(tag_54.sum(min_count=1))
tag_sum.loc['55'] = pd.Series(tag_55.sum(min_count=1))
tag_sum.loc['56'] = pd.Series(tag_56.sum(min_count=1))
tag_sum.loc['57'] = pd.Series(tag_57.sum(min_count=1))
tag_sum.loc['58'] = pd.Series(tag_58.sum(min_count=1))
tag_sum.loc['59'] = pd.Series(tag_59.sum(min_count=1))
tag_sum.loc['60'] = pd.Series(tag_60.sum(min_count=1))
tag_sum = tag_sum.drop(['carbon'], axis=1)
tag_summary = tag_sum.T
tag_summary.insert(loc=0, column='GroupNum', value=np.array(species_all.loc['GroupNum']).astype(float))
tag_summary.insert(loc=0, column='GroupName', value=np.array(species_all.loc['GroupName']))
tag_summary.insert(loc=0, column='SampleID', value=np.array(species_all.loc['SampleID']))
##devide by 3
tag_summary.iloc[:, 3:] = tag_summary.iloc[:, 3:] / 3
# Sum Species
tag_36.loc['36total'] = pd.Series(tag_36.sum(min_count=1)) # minimum 1 number, if all NA, return NA
tag_37.loc['37total'] = pd.Series(tag_37.sum(min_count=1))
tag_38.loc['38total'] = pd.Series(tag_38.sum(min_count=1))
tag_39.loc['39total'] = pd.Series(tag_39.sum(min_count=1))
tag_40.loc['40total'] = pd.Series(tag_40.sum(min_count=1))
tag_41.loc['41total'] = pd.Series(tag_41.sum(min_count=1))
tag_42.loc['42total'] = pd.Series(tag_42.sum(min_count=1))
tag_43.loc['43total'] = pd.Series(tag_43.sum(min_count=1))
tag_44.loc['44total'] = pd.Series(tag_44.sum(min_count=1))
tag_45.loc['45total'] = pd.Series(tag_45.sum(min_count=1))
tag_46.loc['46total'] = pd.Series(tag_46.sum(min_count=1))
tag_47.loc['47total'] = pd.Series(tag_47.sum(min_count=1))
tag_48.loc['48total'] = pd.Series(tag_48.sum(min_count=1))
tag_49.loc['49total'] = pd.Series(tag_49.sum(min_count=1))
tag_50.loc['50total'] = pd.Series(tag_50.sum(min_count=1))
tag_51.loc['51total'] = pd.Series(tag_51.sum(min_count=1))
tag_52.loc['52total'] = pd.Series(tag_52.sum(min_count=1))
tag_53.loc['53total'] = pd.Series(tag_53.sum(min_count=1))
tag_54.loc['54total'] = pd.Series(tag_54.sum(min_count=1))
tag_55.loc['55total'] = pd.Series(tag_55.sum(min_count=1))
tag_56.loc['56total'] = pd.Series(tag_56.sum(min_count=1))
tag_57.loc['57total'] = pd.Series(tag_57.sum(min_count=1))
tag_58.loc['58total'] = pd.Series(tag_58.sum(min_count=1))
tag_59.loc['59total'] = pd.Series(tag_59.sum(min_count=1))
tag_60.loc['60total'] = pd.Series(tag_60.sum(min_count=1))
##summary avg
tag_all2 = lipid_all[lipid_all.columns[pd.Series(lipid_all.columns).str.startswith('TAG')]].copy()
tag_all2 = tag_all2.astype(float)
tag_all2 = pd.concat([tag_all2, lipid_all.iloc[:, 1:4]], axis=1)
tag_all2_drop = tag_all2.drop(['SampleID'], axis=1)
tag_sum_avg = tag_all2_drop.groupby(['GroupName'], as_index=True).mean()
# tag_summary_drop = tag_summary.drop(['SampleID'], axis=1)
# tag_sum_avg = tag_summary_drop.groupby(['GroupName'], as_index=True).mean()
tag_sum_avg = tag_sum_avg.sort_values(by=['GroupNum'])
##devide by 3
tag_sum_avg.iloc[:, 0:(len(tag_sum_avg.columns) - 1)] = tag_sum_avg.iloc[:,
0:(len(tag_sum_avg.columns) - 1)] / 3
tag_sum_avgB = tag_sum_avg.copy()
tag_sum_avg = tag_sum_avg.T
tag_sum_avg = tag_sum_avg.drop(['GroupNum'], axis=0)
tag_sum_avg['carbon'] = tag_sum_avg.index.str.slice(start=3, stop=5)
# tag_sum_avg.carbon = tag_sum_avg.carbon.astype(float)
tag_sum_avg = tag_sum_avg.groupby(['carbon'], as_index=True).sum().T
tag_sum_avg.insert(loc=0, column='GroupNum', value=np.array(tag_sum_avgB['GroupNum']).astype(float))
tag_sum_avg.columns = tag_sum_avg.columns
tag_sum_avgCC = tag_sum_avg.copy()
# tag_sum_avg['GroupNum'] = tag_sum_avgB['GroupNum']
tag_sum_avg = pd.DataFrame(index=tag_sum_avgCC.index, columns=np.arange(36, 61).astype(str))
tag_sum_avg.insert(loc=0, column='GroupNum', value=tag_sum_avgCC['GroupNum'])
for i in tag_sum_avgCC.columns:
tag_sum_avg[i] = tag_sum_avgCC[i]
tag_avg_di = tag_sum_avg['GroupNum'].to_dict()
tag_summary_drop = tag_summary.drop(['SampleID'], axis=1)
tag_sum_sd = tag_summary_drop.groupby(['GroupName'], as_index=True).std()
tag_sum_sd['GroupNum'] = tag_sum_sd.index
tag_sum_sd['GroupNum'] = tag_sum_sd['GroupNum'].replace(tag_avg_di)
tag_sum_sd = tag_sum_sd.sort_values(by=['GroupNum'])
# tag_sum_sd['ExpNum'] = tag_sum_avg['ExpNum']
# Write to Excel
writer = pd.ExcelWriter(file[file.rfind('/') + 1:file.rfind('.')] + '_' + 'TAG_carbon.xlsx')
tag_summary.to_excel(writer, 'Summary')
tag_sum_avg.to_excel(writer, sheet_name='SumAvg')
tag_sum_sd.to_excel(writer, sheet_name='SumAvg', startrow=tag_sum_sd.shape[0] + 5, startcol=0)
tag_36.to_excel(writer, 'TAG36')
tag_37.to_excel(writer, 'TAG37')
tag_38.to_excel(writer, 'TAG38')
tag_39.to_excel(writer, 'TAG39')
tag_40.to_excel(writer, 'TAG40')
tag_41.to_excel(writer, 'TAG41')
tag_42.to_excel(writer, 'TAG42')
tag_43.to_excel(writer, 'TAG43')
tag_44.to_excel(writer, 'TAG44')
tag_45.to_excel(writer, 'TAG45')
tag_46.to_excel(writer, 'TAG46')
tag_47.to_excel(writer, 'TAG47')
tag_48.to_excel(writer, 'TAG48')
tag_49.to_excel(writer, 'TAG49')
tag_50.to_excel(writer, 'TAG50')
tag_51.to_excel(writer, 'TAG51')
tag_52.to_excel(writer, 'TAG52')
tag_53.to_excel(writer, 'TAG53')
tag_54.to_excel(writer, 'TAG54')
tag_55.to_excel(writer, 'TAG55')
tag_56.to_excel(writer, 'TAG56')
tag_57.to_excel(writer, 'TAG57')
tag_58.to_excel(writer, 'TAG58')
tag_59.to_excel(writer, 'TAG59')
tag_60.to_excel(writer, 'TAG60')
writer.save()
print('carbon data saved')
#################
##plot avg & sd##
#################
### set plot size
# plt.figure(num=None, figsize=(36, 12), dpi=80, facecolor='w', edgecolor='k')
# melt data
tag_avg_plot = tag_sum_avg.copy()
tag_avg_plot['GroupName'] = tag_avg_plot.index
tag_avg_plot = tag_avg_plot.drop(['GroupNum'], axis=1)
# tag_avg_plot.dropna(axis=1, how='all', inplace=True)
tag_avg_plot2 = pd.melt(tag_avg_plot, id_vars=["GroupName"], var_name="Carbon", value_name="Avg")
tag_avg_sdplot = tag_sum_sd.copy()
tag_avg_sdplot['GroupName'] = tag_avg_sdplot.index
tag_avg_sdplot = tag_avg_sdplot.drop(['GroupNum'], axis=1)
# tag_avg_sdplot.dropna(axis=1, how='all', inplace=True)
tag_avg_sdplot2 = | pd.melt(tag_avg_sdplot, id_vars=["GroupName"], var_name="Carbon", value_name="SD") | pandas.melt |
# -*- coding: utf-8 -*-
#
# wxtruss
# License: MIT License
# Author: <NAME>
# E-mail: <EMAIL>
# ~ from __future__ import division
import wx
import wx.grid as grid
import wx.html as html
import numpy as np
import matplotlib
matplotlib.use('WXAgg')
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar
import matplotlib.lines as mlines
import wxtruss.iconos as ic
from nusa import * # FEA library
import webbrowser
import pandas as pd
import json
import os
# For versioning
dir_setup = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_setup, '', 'version.py')) as f:
# Defines __version__
exec(f.read())
class wxTruss(wx.Frame):
def __init__(self,parent):
title = "wxTruss {version}".format(version=__version__)
wx.Frame.__init__(self,parent,title=title,size=(900,600))
self.init_menu()
self.init_ctrls()
self.init_model_data()
self.SetBackgroundColour("#FFFFFF")
self.SetIcon(ic.wxtruss.GetIcon())
self.Centre(True)
self.Show()
def init_ctrls(self):
self.mainsz = wx.BoxSizer(wx.HORIZONTAL)
self.upsz = wx.BoxSizer(wx.HORIZONTAL)
self.figsz = wx.BoxSizer(wx.VERTICAL)
self.toolbar = Toolbar(self)
self.toolbar.Realize()
self.upsz.Add(self.toolbar, 0, wx.ALIGN_LEFT)
# Creating figures, axes and canvas
self._set_mpl()
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.mpl_toolbar = NavigationToolbar(self.canvas)
self.mpl_toolbar.Realize()
self.figsz.Add(self.canvas, 12, wx.EXPAND|wx.ALL, 2)
self.figsz.Add(self.mpl_toolbar, 1, wx.EXPAND|wx.ALL, 20)
#~ self.figure.set_facecolor("w")
self.txtout = HTMLWindow(self)
#~ self.txtout.SetMinSize((200,-1))
#~ self.txtout.SetPage("<html></html>")
self.upsz.Add(self.figsz, 1, wx.EXPAND|wx.ALL, 2)
self.mainsz.Add(self.upsz, 5, wx.EXPAND)
self.mainsz.Add(self.txtout, 3, wx.EXPAND)
self.SetSizer(self.mainsz)
# toolbar events
self.Bind(wx.EVT_TOOL, self.add_nodes, self.toolbar.nodes_tool)
self.Bind(wx.EVT_TOOL, self.add_elements, self.toolbar.elements_tool)
self.Bind(wx.EVT_TOOL, self.add_constraints, self.toolbar.constraints_tool)
self.Bind(wx.EVT_TOOL, self.add_forces, self.toolbar.forces_tool)
self.Bind(wx.EVT_TOOL, self.plot_model, self.toolbar.plot_model_tool)
self.Bind(wx.EVT_TOOL, self.solve_model, self.toolbar.solve_tool)
self.Bind(wx.EVT_TOOL, self.plot_deformed_shape, self.toolbar.plot_deformed_shape_tool)
def _set_mpl(self):
matplotlib.rc('figure', facecolor="#ffffff")
matplotlib.rc('axes', facecolor="#ffffff", linewidth=0.1, grid=False)
# ~ matplotlib.rc('font', family="Times New Roman")
def init_menu(self):
m_file = wx.Menu()
new_model = m_file.Append(-1, "New model \tCtrl+N")
from_json = m_file.Append(-1, "Read model from Truss/JSON file... \tCtrl+J")
quit_app = m_file.Append(-1, "Quit \tCtrl+Q")
m_help = wx.Menu()
_help = m_help.Append(-1, "Help")
about = m_help.Append(-1, "About...")
menu_bar = wx.MenuBar()
menu_bar.Append(m_file, "File")
menu_bar.Append(m_help, "Help")
self.SetMenuBar(menu_bar)
self.Bind(wx.EVT_MENU, self.on_new_model, new_model)
self.Bind(wx.EVT_MENU, self.on_from_json, from_json)
self.Bind(wx.EVT_MENU, self.on_about, about)
self.Bind(wx.EVT_MENU, self.on_help, _help)
self.Bind(wx.EVT_MENU, self.on_quit, quit_app)
def init_model_data(self):
try:
self.read_model_from_json("data/exampsle_01.truss")
except:
# self.nodes = np.array([[0,0],[2,0],[0,2]])
# self.elements = np.array([[1,2,200e9,1e-4],[2,3,200e9,1e-4],[1,3,200e9,1e-4]])
# self.forces = np.array([[3,1000,0]])
# self.constraints = np.array([[1, 0, 0], [2, 0, 0]])
self.nodes = []
self.elements = []
self.forces = []
self.constraints = []
def isempty(self,arg):
if not arg:
return True
return False
def on_about(self,event):
AboutDialog(None)
def on_help(self,event):
print("Help unavailable")
def on_quit(self,event):
self.Close()
def on_new_model(self,event):
self.nodes = []
self.elements = []
self.forces = []
self.constraints = []
self.axes.cla()
self.txtout.SetPage("")
self.canvas.draw()
def on_from_json(self,event):
path = ""
wildcard = "Truss file (*.truss)|*.truss| JSON file (*.json)|*.json"
dlg = wx.FileDialog(self, message="Select a Truss/JSON file...",
defaultDir=os.getcwd(), wildcard=wildcard, style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
dlg.Destroy()
if not path:
wx.MessageBox('Not file selected', 'wxTruss', wx.OK | wx.ICON_INFORMATION)
else:
self.read_model_from_json(path)
def build_model(self):
nc = self.nodes
ec = self.elements
x,y = nc[:,0], nc[:,1]
nodos = []
elementos = []
for k,nd in enumerate(nc):
cn = Node((x[k],y[k]))
nodos.append(cn)
for k,elm in enumerate(ec):
i,j,E,A = int(elm[0]-1),int(elm[1]-1),elm[2],elm[3]
ni,nj = nodos[i],nodos[j]
ce = Truss((ni,nj), E, A)
elementos.append(ce)
self.model = TrussModel("Truss Model")
for n in nodos: self.model.add_node(n)
for e in elementos: self.model.add_element(e)
for c in self.constraints:
k,ux,uy = int(c[0]),c[1],c[2]
if ~np.isnan(ux) and ~np.isnan(uy):
self.model.add_constraint(nodos[k-1], ux=ux, uy=uy)
elif ~np.isnan(ux):
self.model.add_constraint(nodos[k-1], ux=ux)
elif ~np.isnan(uy):
self.model.add_constraint(nodos[k-1], uy=uy)
for f in self.forces:
k,fx,fy = int(f[0]),f[1],f[2]
self.model.add_force(nodos[k-1],(fx,fy))
def solve_model(self,event):
self.build_model()
self.model.solve()
self.html_report()
def html_report(self):
m = self.model
NODES = [n.label+1 for n in m.get_nodes()]
ELEMENTS = [e.label+1 for e in m.get_elements()]
el = [e.get_nodes() for e in m.get_elements()]
ELEMENTS_CONN = [(ni.label+1,nj.label+1) for ni,nj in el]
NODAL_COORDS = [[n.x,n.y] for n in m.get_nodes()]
NODAL_DISPLACEMENTS = [[n.ux,n.uy] for n in m.get_nodes()]
NODAL_FORCES = [[n.fx,n.fy] for n in m.get_nodes()]
ELEMENT_FORCES = [e.f for e in m.get_elements()]
ELEMENT_STRESSES = [e.s for e in m.get_elements()]
EL_INFO = pd.DataFrame(ELEMENTS_CONN, columns=["Ni","Nj"], index=ELEMENTS).to_html()
ND_COORDS = pd.DataFrame(NODAL_COORDS, columns=["X","Y"], index=NODES).to_html()
ND = pd.DataFrame(NODAL_DISPLACEMENTS, columns=["UX","UY"], index=NODES).to_html()
NF = | pd.DataFrame(NODAL_FORCES, columns=["FX","FY"], index=NODES) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from src.PolutantsTable import PolutantsTable as pt
class URBAGgraphs:
df = pd.DataFrame()
obs_color='blue'
pre_color='tab:orange'
dpi = 100
def load_file(self, file):
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d')
self.df = pd.read_csv(file, parse_dates=['DATA'], date_parser=dateparse)
def filter_by_time(self, start_date, end_date):
'''
filter df by a range of dates in YYYY-MM-DD format
'''
result = self.df[(self.df['DATA'] >= start_date) & (self.df['DATA'] <= end_date)]
return result
def dailyProfile(self, polutant, estacions, model):
dailyProfileMean = self.df[self.df['NOM ESTACIO'].isin(estacions)].groupby(by=['HOUR']).mean()
dailyProfileStd = self.df[self.df['NOM ESTACIO'].isin(estacions)].groupby(by=['HOUR']).std()
quantile05 = self.df[self.df['NOM ESTACIO'].isin(estacions)].groupby(by=['HOUR']).quantile(0.05)
quantile95 = self.df[self.df['NOM ESTACIO'].isin(estacions)].groupby(by=['HOUR']).quantile(0.95)
return dailyProfileMean, dailyProfileStd, quantile05, quantile95
def plot_Daily_Profile_Error_Bars(self, polutant, estacions, model, filename='./output/DailyProfileErrBars', error = '90percentil', description = False):
'''
createds a daily profile plot with the loaded file in the self.df
indicate pol
error = std / 90percentil
'''
dp, dpd, q5, q95 = self.dailyProfile(polutant, estacions, model)
hours = dp.index
obs = dp['OBSERVATION']
pre = dp[model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']]
obs_std = dp['OBSERVATION']
pre_std = dpd[model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']]
obs_q5 = q5['OBSERVATION']
obs_q95 = q95['OBSERVATION']
pre_q5 = q5[model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']]
pre_q95 = q95[model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']]
fig, ax1 = plt.subplots(nrows=1, sharex=True)
if(error == 'std'):
ax1.errorbar(hours, obs, yerr=obs_std, color=self.obs_color, label="observation", fmt='o')
ax1.errorbar(hours, pre, yerr=pre_std, color=self.pre_color, label='prediction', fmt='o')
error_text = 'standard deviation'
elif(error == '90percentil'):
ax1.errorbar(hours, obs, yerr=np.vstack([obs_q5, obs_q95]), color=self.obs_color, label="observation", fmt='o')
ax1.errorbar(hours, pre, yerr=np.vstack([pre_q5, pre_q95]), color=self.pre_color, label='prediction', fmt='o')
error_text = 'percentil 90'
if description == True:
start_date = self.df['DATA'].min()
end_date = self.df['DATA'].max()
line1 = 'Daily profile from {start} to {end}.\n'.format(start=start_date, end=end_date)
line2 = 'The error is given by {error}'.format(error = error_text)
full_text = line1 + line2
ax1.text(0,-6,full_text)
ax1.set_xlabel('hours')
ax1.set_ylabel('concentration {}'.format(pt.AllPolutants[polutant]['obs_units']) )
ax1.set_title(str(estacions)+' '+polutant)
ax1.legend()
plt.savefig(filename, dpi=self.dpi, bbox_inches='tight')
plt.show()
return dp, dpd, q5, q95
def plot_Daily_Profile_Areas(self, polutant, estacions, model, filename='./output/DailyProfileArea', error = '90percentil', description = False):
'''
createds a daily profile plot with the loaded file in the self.df
indicate pol
error = std / 90percentil
'''
dp, dpd, q5, q95 = self.dailyProfile(polutant, estacions, model)
hours = dp.index
obs = dp['OBSERVATION']
pre = dp[model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']]
obs_std = dp['OBSERVATION']
pre_std = dpd[model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']]
obs_q5 = q5['OBSERVATION']
obs_q95 = q95['OBSERVATION']
pre_q5 = q5[model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']]
pre_q95 = q95[model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']]
fig, ax1 = plt.subplots(nrows=1, sharex=True)
ax1.plot(hours, obs, color='b', label='observation')
ax1.plot(hours, pre, color='tab:orange', label='prediction')
if(error == 'std'):
ax1.fill_between(hours, obs+obs_std, obs-obs_std, facecolor='blue', alpha=0.2)
ax1.fill_between(hours, pre+pre_std, pre-pre_std, facecolors='tab:orange', alpha=0.2)
error_text = 'standard deviation'
elif(error == '90percentil'):
ax1.fill_between(hours, obs+obs_q95, obs-obs_q5, facecolor='blue', alpha=0.2)
ax1.fill_between(hours, pre+pre_q95, pre-pre_q5, facecolors='tab:orange', alpha=0.2)
error_text = 'percentil 90'
if description == True:
start_date = self.df['DATA'].min()
end_date = self.df['DATA'].max()
line1 = 'Daily profile {start} / {end}.\n'.format(start=start_date, end=end_date)
line2 = 'The error is given by {error}'.format(error = error_text)
full_text = line1 + line2
ax1.text(1,45,full_text)
ax1.set_xlabel('hours')
ax1.set_ylabel('concentration {}'.format(pt.AllPolutants[polutant]['obs_units']) )
ax1.set_title(str(estacions)+' '+polutant)
ax1.legend()
plt.savefig(filename, dpi=self.dpi, bbox_inches='tight')
plt.show()
return dp, dpd, q5, q95
def listEstacions(self):
result = self.df.groupby(by=['NOM ESTACIO','MUNICIPI', 'LATITUD', 'LONGITUD', 'ALTITUD', 'AREA URBANA']).count()[['CODI EOI']]
return result
def timeSeries(self, polutant, estacio, model):
timeSerieEstacio = self.df[self.df['NOM ESTACIO'] == estacio].sort_values(by=['DATA'])
return timeSerieEstacio
def plot_Time_Series_v1(self, polutant, estacio, model, filename='./output/Time_Series_1'):
df = self.timeSeries(polutant, estacio, model)
df['DATETIME'] = df.apply(lambda r:
str(r['YEAR'])+'-'+str(r['MONTH']).zfill(2)+'-'+str(r['DAY']).zfill(2)+' '+str(r['HOUR']).zfill(2)+':00:00',axis=1)
df['DATETIME'] = pd.to_datetime(df['DATETIME'], infer_datetime_format=False)
plt.style.use('ggplot')
df.plot(x='DATETIME',
y=['OBSERVATION', model+'_'+polutant+'_'+pt.AllPolutants[polutant]['obs_units']],
figsize=(12,4))
plt.title(polutant+' '+estacio)
plt.ylabel('concentration {}'.format(pt.AllPolutants[polutant]['obs_units']) )
plt.savefig(filename, dpi=self.dpi)
plt.show()
return df
def plot_Time_Series_v2(self, polutant, estacio, model, filename='./output/Time_Series_2'):
sns.set()
df = self.timeSeries(polutant, estacio, model)
df['DATETIME'] = df.apply(lambda r:
str(r['YEAR'])+'-'+str(r['MONTH']).zfill(2)+'-'+str(r['DAY']).zfill(2)+' '+str(r['HOUR']).zfill(2)+':00:00',axis=1)
df['DATETIME'] = | pd.to_datetime(df['DATETIME'], infer_datetime_format=False) | pandas.to_datetime |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype=dict(C=None))
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
result = df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._mgr is df2._mgr
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._mgr is df2._mgr
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
df = df_orig.copy()
df2 = df
df["A"] += 1
expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
df = df_orig.copy()
df2 = df
df["A"] += 1.5
expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
@pytest.mark.parametrize(
"op",
[
"add",
"and",
"div",
"floordiv",
"mod",
"mul",
"or",
"pow",
"sub",
"truediv",
"xor",
],
)
def test_inplace_ops_identity2(self, op):
if op == "div":
return
df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})
operand = 2
if op in ("and", "or", "xor"):
# cannot use floats for boolean ops
df["a"] = [True, False, True]
df_copy = df.copy()
iop = f"__i{op}__"
op = f"__{op}__"
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
tm.assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ["A", "B", "C"]
columns = ["X", "Y", "Z"]
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [
[1, 2, 3],
(1, 2, 3),
np.array([1, 2, 3], dtype=np.int64),
range(1, 4),
]:
expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index)
tm.assert_frame_equal(align(df, val, "index")[1], expected)
expected = DataFrame(
{"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index
)
tm.assert_frame_equal(align(df, val, "columns")[1], expected)
# length mismatch
msg = "Unable to coerce to Series, length must be 3: given 2"
for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(
align(df, val, "index")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
tm.assert_frame_equal(
align(df, val, "columns")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
# shape mismatch
msg = "Unable to coerce to DataFrame, shape must be"
val = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
val = np.zeros((3, 3, 3))
msg = re.escape(
"Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)"
)
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
def test_no_warning(self, all_arithmetic_operators):
df = | pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) | pandas.DataFrame |
from pysam import VariantFile
import sys
import pandas as pd
def parse_vcf(input_vcf):
# Opening the input file
vcf_in = VariantFile(input_vcf, 'r') # auto-detect input format
# making the different arrays
contig_list = [i.contig for i in vcf_in.fetch()]
sv_type_list = [i.info['SVTYPE'] for i in vcf_in.fetch()]
sv_reference = [i.ref for i in vcf_in.fetch()]
sv_alternative = [i.alts[0] for i in vcf_in.fetch()]
sv_position = [i.pos for i in vcf_in.fetch()]
sv_stop_pos = [i.stop for i in vcf_in.fetch()]
# putting all of the relevant vcf information into a datastructure
import pandas as pd
# intialize data of lists.
vcf_data = {'Contig Name':contig_list, 'SV type':sv_type_list, 'REF SV':sv_reference, 'ALT SV':sv_alternative, 'SV POS':sv_position, 'SV STOP':sv_stop_pos}
pandas_dataframe = | pd.DataFrame(vcf_data) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
import re
import string
@pd.api.extensions.register_dataframe_accessor('zookeeper')
class ZooKeeper:
def __init__(self, pandas_obj):
# validate and assign object
self._validate(pandas_obj)
self._obj = pandas_obj
# define incorporated modules - columns consisting of others will not have the dtype changed
self._INCORPORATED_MODULES = ['builtins', 'numpy', 'pandas']
# define a possible list of null values
self._NULL_VALS = [None, np.nan, 'np.nan', 'nan', np.inf, 'np.inf', 'inf', -np.inf, '-np.inf', '', 'n/a', 'na',
'N/A', 'NA', 'unknown', 'unk', 'UNKNOWN', 'UNK']
# assign dtypes and limits
# boolean
BOOL_STRINGS_TRUE = ['t', 'true', 'yes', 'on']
BOOL_STRINGS_FALSE = ['f', 'false', 'no', 'off']
self._BOOL_MAP_DICT = {i: True for i in BOOL_STRINGS_TRUE}.update({i: False for i in BOOL_STRINGS_FALSE})
self._DTYPE_BOOL_BASE = np.bool
self._DTYPE_BOOL_NULLABLE = pd.BooleanDtype()
# unsigned integers - base and nullable
self._DTYPES_UINT_BASE = [np.uint8, np.uint16, np.uint32, np.uint64]
self._DTYPES_UINT_NULLABLE = [pd.UInt8Dtype(), pd.UInt16Dtype(), | pd.UInt32Dtype() | pandas.UInt32Dtype |
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
import pandas as pd
from sklearn.datasets import load_iris
data = load_iris()
X_np = data.data
y_np = data.target
iris_X_train_columns = ["x1", "x2", "x3", "x4"]
X_pd = | pd.DataFrame(X_np, columns=iris_X_train_columns) | pandas.DataFrame |
"""
Fake data generator.
"""
import datetime
import os
from typing import Dict
import collections
import numpy as np
import pandas as pd
# Generic type definitions.
ndist_params = collections.namedtuple('ndist_params', ('mu', 'sigma', 'derives_from', 'decimals'))
#
# Generator settings
#
# Base paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, 'data')
# Input files.
AD_GROUP_NAMES_FILE = os.path.join(DATA_DIR, 'gen_ad_groups.csv')
WEEKLY_PERF_FILE = os.path.join(DATA_DIR, 'gen_weekly_perf.csv')
WEEKDAY_PERF_FILE = os.path.join(DATA_DIR, 'gen_weekday_perf.csv')
# Settings for the random generator.
METRICS_RAND_SETTINGS: Dict[str, ndist_params] = {
'Impressions': ndist_params(mu=200, sigma=40, derives_from=None, decimals=0),
'Clicks': ndist_params(mu=0.1, sigma=0.01, derives_from='Impressions', decimals=0),
'Cost': ndist_params(mu=5, sigma=1, derives_from='Clicks', decimals=2),
'Conversions': ndist_params(mu=0.1, sigma=0.02, derives_from='Clicks', decimals=0),
'ConversionsValue': ndist_params(mu=1500, sigma=500, derives_from='Conversions', decimals=2),
}
HIGH_QUALITY_SCORE_SETTINGS = ndist_params(mu=7, sigma=2, derives_from=None, decimals=0)
LOW_QUALITY_SCORE_SETTINGS = ndist_params(mu=4, sigma=2, derives_from=None, decimals=0)
KEYWORD_IMPRESSIONS_SETTINGS = ndist_params(mu=500, sigma=300, derives_from=None, decimals=0)
# Simulated days without credit.
DAYS_WITHOUT_CREDIT = {
datetime.datetime(2018, 3, 17),
datetime.datetime(2018, 3, 18),
}
# Output files.
AD_GROUP_DATA_FILE = os.path.join(DATA_DIR, 'data_ad_group_performance.xlsx')
QUALITY_SCORE_DATA_FILE = os.path.join(DATA_DIR, 'data_keywords_quality_score.xlsx')
def load_weekday_perf(filename) -> pd.DataFrame:
"""
Loads the data file with source week days.
:param filename: File path.
:return: Loaded DataFrame.
"""
return pd.read_csv(filename, header=0)
def load_weekly_perf(filename) -> pd.DataFrame:
"""
Loads the data file with source weekly performance.
:param filename: File path
:return: Loaded DataFrame.
"""
weekly_perf = pd.read_csv(filename, header=0)
weekly_perf['iso_week'] = pd.to_datetime(weekly_perf['iso_week'], format='%YW%W-%w')
return weekly_perf
def load_ad_groups(filename) -> pd.DataFrame:
"""
Loads the data file with ad groups.
:param filename: File path.
:return: Loaded DataFrame.
"""
return pd.read_csv(filename, header=0)
def generate_ad_group_performance(ad_groups: pd.DataFrame, weekly_perf: pd.DataFrame, weekday_perf: pd.DataFrame) \
-> pd.DataFrame:
"""
Generates a data set with ad group daily performance.
:param ad_groups: Ad groups.
:param weekly_perf: Performance for each week.
:param weekday_perf: Performance for each week day.
:return: Generated DataFrame.
"""
# Join the tables.
result: pd.DataFrame = pd.merge(ad_groups, weekly_perf, on='key', how='inner')
result: pd.DataFrame = | pd.merge(result, weekday_perf, on='key', how='inner') | pandas.merge |
#visualization_and_clustering.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
import sklearn
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import chi2
import seaborn as sns
from sklearn.decomposition import PCA, IncrementalPCA, NMF
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.manifold import TSNE
from sklearn.ensemble import ExtraTreesClassifier
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
sns.set_style("white")
sns.set_palette("Set2")
plt.style.use('seaborn-white')
def visualization2d(X, y):
X = preprocessing.scale(X)
data_subset = X
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(data_subset)
sns.set_palette("Set2")
plt.figure(figsize=(16,10))
sns.scatterplot(x=tsne_results[:, 0], y=tsne_results[:, 1], hue=y
,cmap='Set2', legend='brief') # hue = y
plt.legend(title='Tsne', loc='upper left', labels=['ARGILE', 'LIMON', 'LOAM', 'SABLE'])
plt.title('Tsne Visualization in 2D')
plt.tight_layout()
plt.savefig('Tsne')
plt.close()
def visualization3d(X, y):
X = preprocessing.scale(X)
data_subset = X
tsne = TSNE(n_components=3, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(data_subset)
ax = plt.axes(projection='3d')
for color, i, target_name in zip(mpl.cm.Set2.colors[:4], ['ARGILE', 'LIMON', 'LOAM', 'SABLE'], ['ARGILE', 'LIMON', 'LOAM', 'SABLE']):
ax.scatter(tsne_results[np.where(y.to_numpy() == i), 0], tsne_results[np.where(y.to_numpy() == i), 1], tsne_results[np.where(y.to_numpy() == i), 2],
label=target_name, color=color)
plt.title('tsne visualization' + " of chemical dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.tight_layout()
plt.savefig('3d_tsne')
plt.close()
def pca(X):
pca = PCA(2)
pca.fit(X)
print('number of the components', pca.n_components_ )
print('explained variance', pca.explained_variance_)
return(pca.fit_transform(X))
def clustering_dbscan(X, labels_true):
X = StandardScaler().fit_transform(X)
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters_)
print('Estimated number of noise points: %d' % n_noise_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
# Plot result
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', #, markerfacecolor=tuple(col)
markeredgecolor='k' ,markersize=8. ) #markeredgecolor='k',
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', #, markerfacecolor=tuple(col),
markeredgecolor='k', markersize=2)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.xlabel('First Component')
plt.ylabel('Second Component')
plt.legend()
plt.savefig('clusteringdbscan')
plt.close()
def clustering_kmeans(X, labels_true):
X = StandardScaler().fit_transform(X)
# #############################################################################
# Compute DBSCAN
kmeans = KMeans(n_clusters=4, random_state=0).fit(X)
labels = kmeans.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters_)
print('Estimated number of noise points: %d' % n_noise_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
# Plot result
# Black removed and is used for noise instead.
#X = pca(X)
sns.set_palette('Set2')
sns.scatterplot(x=X[:, 0], y=X[:, 1],
hue=labels_true, style=labels, legend='brief')
plt.savefig('clustering_kmeans')
plt.close()
def feature_selection(X, y, data, number_features):
bestfeatures = SelectKBest(score_func=chi2, k=number_features)
fit = bestfeatures.fit(X,y)
dfscores = | pd.DataFrame(fit.scores_) | pandas.DataFrame |
from telegram.error import ChatMigrated, BadRequest, Unauthorized, TimedOut, NetworkError
from telegram.ext import Updater
from telegram import Poll, Bot, PollOption, User, TelegramError
import os
# import telepot
import random
import csv
import pandas as pd
from flask import Flask, request
from properties.p import Property
from datetime import datetime
from threading import Lock, Thread
from datetime import date
import time
# Import smtplib for the actual sending function
import smtplib
# Import the email modules we'll need
from email.message import EmailMessage
from telegram.utils.request import Request
number_warnning_user = {}
lock = Lock()
user_error_count = {}
user_real = {}
prop = Property()
user_examples = []
max_allowed_tweet = 500 # 500 tweets
max_allowed_time = 600
number_tweet_to_reward = 60 # how many tweets the user should annotate to get crads
controls_per_tweet = 6 # for every 5 tweet, we need one control question
bot_prop = prop.load_property_files('bot.properties')
tweet_id_time = {}
users = []
annotated_tweet_ids = []
annoated_tweet_user_ids = {}
if not os.path.exists('annotated_tweets.csv'):
columns = ['tweet_id', 'sentiment', 'tweet', 'username']
columns = ['tweet_id', 'sentiment', 'tweet', 'username']
df = pd.DataFrame(columns=columns)
df.to_csv('annotated_tweets.csv', index=False)
else:
data2 = | pd.read_csv('annotated_tweets.csv', encoding='utf8') | pandas.read_csv |
# This gets all the census data, can be filted by level and state.
# Should play with all the chunk sizes, to see how that affects speed. I'm leaving a message in censusreporter_api.py for now that will alert you if the size gets too big and it does a json_merge. json_merge is slow, we want to avoid those.
import pandas as pd
from censusreporter_api import *
import os
from io import BytesIO
import io
from zipfile import ZipFile
import requests
import datetime
import re
import argparse
from bs4 import BeautifulSoup
def getTractInfo(url, regex=''):
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a', href=re.compile(regex))]
BASE_URL = "http://www2.census.gov/geo/docs/maps-data/data/gazetteer/"
YEAR = datetime.datetime.now().year
GAZ_YEAR_URL = '{}{}_Gazetteer/'.format(BASE_URL, YEAR)
# For easier Windows compatibility
OUTPUT_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))),
'dimensionaldata'
)
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
STATE_LIST = [ 'AL','AK','AZ','AR','CA','CO','CT','DE','DC','FL','GA','HI','ID','IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY','PR']
STATE_CODES = {'AL': '01','AK': '02','AZ': '04','AR': '05','CA': '06','CO': '08','CT': '09','DE': '10','DC': '11','FL': '12','GA': '13','HI': '15','ID': '16','IL': '17','IN': '18','IA': '19','KS': '20','KY': '21','LA': '22','ME': '23','MD': '24','MA': '25','MI': '26','MN': '27','MS': '28','MO': '29','MT': '30','NE': '31','NV': '32','NH': '33','NJ': '34','NM': '35','NY': '36','NC': '37','ND': '38','OH': '39','OK': '40','OR':'41','PA': '42','RI': '44','SC': '45','SD': '46','TN': '47','TX': '48','UT': '49','VT': '50','VA': '51','WA': '53','WV': '54','WI': '55','WY': '56','PR':'72'}
STATE_ABBREVS = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
DATA_TABLES = ['B01001','B03002','B06008','B23001','B19001','B25009','B25077']
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--states", help="State Abbreviation List, space seperated ie NY AK", nargs="*")
parser.add_argument("-t", "--type", help="ALL|County|Upper|Lower|Congress|City|State|Tract space separated", nargs="*")
def get_combinedData(thePD=None, tables=None):
geoids = thePD.index.tolist()
try:
dFrame = get_dataframe(geoids=geoids, tables=tables)
except Exception as e: #This should never happen, it's handled in censusreporter_api but just in case...
handledError = "release doesn't include GeoID(s) "
errorMsg = str(e)
print(errorMsg)
if handledError in errorMsg:
pattern = re.compile("^\s+|\s*,\s*|\s+$")
geoList = pattern.split(errorMsg.partition(handledError)[2].replace(".", ""))
thePD = thePD[-thePD.index.isin(geoList)]
#If everything was not valid, then we'll just return nothing
if len(thePD) == 0:
return None
return get_combinedData(thePD, tables)
else:
raise
else:
return dFrame
return None
def get_zip(file_url):
url = requests.get(file_url)
zipfile = ZipFile(BytesIO(url.content), 'r')
zip_names = zipfile.namelist()
if len(zip_names) == 1:
file_name = zip_names.pop()
extracted_file = zipfile.open(file_name).read()
return extracted_file
# Util for cleaning up column names of extra whitespace
def strip_colnames(df):
all_cols = df.columns.values.tolist()
col_dict = {}
for col in all_cols:
col_dict[col] = col.strip()
return df.rename(columns=col_dict)
# Gets voter_file_id from different jurisdiction types
def parse_voter_file_id(row):
if str(row['GEOID']).endswith('ZZ'):
return None
# If not ZZ, return letter for district (Alaska has lettered districts)
if not str(row['GEOID'])[-1:].isdigit():
return str(row['GEOID'])[-1:]
# Multiplier is 100 for congress, 1000 for all other types
if row['ENTITYTYPE'] == 'congress':
state_mult = 100
else:
state_mult = 1000
voter_file_id = int(row['GEOID']) - (int(row['STATEFP']) * state_mult)
# Some states with 1 district return 0, return 1 for those
if voter_file_id > 0:
return str(voter_file_id)
else:
return '1'
def get_census_data(geo_type, geo_url, state_list, fips_func,
state_idx=(0, 0),
census_tables=DATA_TABLES,
find_zz=False,
delim='\t',
chunk_size=250):
print("Working " + geo_type)
if requests.get(geo_url).status_code != 200:
raise ValueError("{} file not found at URL: {}".format(geo_type, geo_url))
# Changing source if city URL
if geo_type != 'City' and geo_type != "Tract":
csv_file = get_zip(geo_url)
file_source = io.StringIO(csv_file.decode('cp1252'))
else:
file_source = geo_url
reader = pd.read_csv(file_source,
delimiter=delim,
iterator=True,
chunksize=chunk_size)
context_df_list = []
census_df_list = []
for chunk in reader:
if geo_type == "Tract":
chunk.rename(columns={'CODE': 'GEOID'}, inplace=True)
chunk['USPS'] = state_list[0] #Tracts are passed in one state at a time, but don't have this field
else:
chunk = chunk.loc[chunk['USPS'].isin(state_list)]
if find_zz:
chunk['GEOID'] = chunk['GEOID'].astype(str)
chunk = chunk.loc[chunk['GEOID'].str.find('ZZ') == -1]
if len(chunk) > 0:
chunk['FIPS'] = chunk['GEOID'].apply(fips_func)
context_df_list.append(chunk)
chunk = chunk.set_index('FIPS')
data = get_combinedData(chunk, tables=census_tables)
census_df_list.append(data)
context_df = pd.concat(context_df_list)
census_df = pd.concat(census_df_list)
context_df['STATEFP'] = context_df['GEOID'].apply(
lambda x: str(x)[:state_idx[0]].zfill(state_idx[1])
)
context_df['ENTITYTYPE'] = geo_type.lower()
# Check if no census data returned, then just return context info
if len(census_df.columns.values.tolist()) == 0:
return strip_colnames(context_df.set_index('FIPS'))
census_df = census_df.rename(columns={'GEOID': 'FIPS'})
census_df = strip_colnames(census_df.set_index('FIPS'))
context_df = strip_colnames(context_df.set_index('FIPS'))
data = context_df.join(census_df)
return data
# State process is different enough to warrant its own function
def get_state(state_list, state_codes, census_tables=DATA_TABLES):
print("Starting State")
df = pd.DataFrame()
cTemp = [] #I know there is a better way, but this works for me
for state in state_list:
cTemp.append([state, state_codes[state]])
c = pd.DataFrame(cTemp, columns=['USPS', 'GEOID'])
c['FIPS'] = c['GEOID'].apply(lambda x: "04000US" + str(x).zfill(2))
c = strip_colnames(c.set_index('FIPS'))
data = get_combinedData(c, tables=census_tables)
print("data Size: " + str(len(data)))
df = df.append(data)
c['STATEFP'] = state_codes[state]
c['ENTITYTYPE'] = "state"
df = df.rename(columns={'GEOID': 'FIPS'})
df = strip_colnames(df.set_index('FIPS'))
data = c.join(df)
return data
if __name__ == '__main__':
args = parser.parse_args()
print("Writing to "+OUTPUT_DIR)
if args.states is None:
state_list = STATE_LIST
else:
state_list = [element.upper() for element in args.states]
if args.type is None:
types = 'ALL'
else:
types = [element.upper() for element in args.type]
for state in state_list:
if state not in STATE_CODES:
raise ValueError("Unknown state: " + state)
# Verify Gazetteer URL
while requests.get(GAZ_YEAR_URL).status_code != 200:
YEAR -= 1
GAZ_YEAR_URL = '{}{}_Gazetteer/'.format(BASE_URL, YEAR)
print(GAZ_YEAR_URL)
FILE_BASE_URL = GAZ_YEAR_URL + str(YEAR) + "_Gaz_"
output_df = pd.DataFrame()
if types == 'ALL' or "COUNTY" in types:
county_df = get_census_data(
'County',
FILE_BASE_URL + 'counties_national.zip',
state_list,
lambda x: "05000US{0:0=5d}".format(int(x)),
state_idx=(-3, 2)
)
county_df['VOTER_FILE_ID'] = county_df.apply(
parse_voter_file_id,
axis=1
)
output_df = output_df.append(county_df)
if types == 'ALL' or "CONGRESS" in types:
"""
Now we do congressional districts. These are numbered, so we need to guess
which one it is. We'll start with the year and subtract 1789 (first congress)
and divide by 2 (2 year terms), then we'll add 2 more since they don't really
end on the right year and we want to make sure we get it right. Then we'll
test the URL and keep removing 1 until we find it.
"""
congress = int((YEAR - 1789) / 2) + 2
conYearURL = FILE_BASE_URL + str(congress) + "CDs_national.zip"
while requests.get(conYearURL).status_code != 200:
if congress < 115: #Using 115 as when I wrote this code that was the current number, so I know that exists
raise ValueError("Crap, can't find congress file at: " + conYearURL)
congress -= 1
conYearURL = FILE_BASE_URL + str(congress) + "CDs_national.zip"
congress_df = get_census_data(
'Congress',
conYearURL,
state_list,
lambda x: "50000US" + str(x).zfill(4),
state_idx=(-2, 2)
)
congress_df['VOTER_FILE_ID'] = congress_df.apply(
parse_voter_file_id,
axis=1
)
congress_df['NAME'] = congress_df['VOTER_FILE_ID'].apply(
lambda x: 'Congressional District {}'.format(x) if x else None
)
output_df = pd.concat([output_df, congress_df])
if types == 'ALL' or "LOWER" in types:
state_house_df = get_census_data(
'Lower House',
FILE_BASE_URL + "sldl_national.zip",
state_list,
lambda x: "62000US" + str(x).zfill(5),
state_idx=(-3, 2),
find_zz=True
)
state_house_df['VOTER_FILE_ID'] = state_house_df.apply(
parse_voter_file_id,
axis=1
)
output_df = pd.concat([output_df, state_house_df])
if types == 'ALL' or "UPPER" in types:
upper_house_df = get_census_data(
'Upper House',
FILE_BASE_URL + "sldu_national.zip",
state_list,
lambda x: "61000US" + str(x).zfill(5),
state_idx=(-3, 2),
find_zz=True
)
upper_house_df['VOTER_FILE_ID'] = upper_house_df.apply(
parse_voter_file_id,
axis=1
)
output_df = pd.concat([output_df, upper_house_df])
# School Districts: high school pattern is: 96000US0400450,
# elementary school district pattern is: 95000US0400005
if types == 'ALL' or "CITY" in types:
city_base_url = GAZ_YEAR_URL + str(YEAR)
city_df_list = []
"""
Instead of building iteration in to the city function, iterate through,
supplying each base URL, and give each one a state list with only the state
pulled in the URL
"""
for state in state_list:
city_url = '{}_gaz_place_{}.txt'.format(city_base_url, STATE_CODES[state])
state_city_df = get_census_data(
'City',
city_url,
[state],
lambda x: "16000US" + str(x).zfill(7),
state_idx=(-5, 2)
)
city_df_list.append(state_city_df)
city_df = pd.concat(city_df_list)
output_df = pd.concat([output_df, city_df])
if types == 'ALL' or "STATE" in types:
state_df = get_state(state_list, STATE_CODES)
state_df['NAME'] = state_df['USPS'].apply(lambda x: STATE_ABBREVS[x])
output_df = | pd.concat([output_df, state_df]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# 1 Import libraries and Set path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import scipy.stats as scs
from scipy.stats.mstats import winsorize
from scipy.stats.mstats import gmean
from tabulate import tabulate
# 2 Set path of my sub-directory
from pathlib import Path
# key in your own file path below
myfolder = Path('key in your own file path here')
# 3 Set up files to write output and charts
from matplotlib.backends.backend_pdf import PdfPages
outfile = open('output.txt', 'w')
chartfile = PdfPages('chart-retreg.pdf')
# Stock returns data
# 4 Read Compustat monthly stock returns data
df1 = pd.read_csv(my-folder / 'stock-returns.csv', parse_dates = ['datadate'])
df1 = df1.sort_values(by=['gvkey','datadate'])
df1 = df1.dropna()
# 5 Create portfolio formation year (pfy) variable, where
# pfy = current year for Jul-Dec dates and previous year for Jan-Jun dates.
# This is to facilitate compounding returns over Jul-Jun by pfy later below.
df1['year'], df1['month'] = df1['datadate'].dt.year, df1['datadate'].dt.month
df1['pfy'] = np.where(df1.month > 6, df1.year, df1.year - 1)
# 6 Compute monthly return compounding factor (1+monthly return)
# trt1m is the monthly return, express as percentage, need to convert to % by / 100
df1['mretfactor'] = 1 + df1.trt1m/100
df1 = df1.sort_values(by=['gvkey','pfy'])
df2 = df1[['gvkey', 'conm', 'datadate', 'pfy', 'mretfactor']]
# 7 Compound monthly returns to get annual returns at end-June of each pfy,
# ensuring only firm-years with 12 mths of return data from Jul-Jun are selected.
df2['yret'] = df2.groupby(['gvkey', 'pfy'])['mretfactor'].cumprod() - 1
df3 = df2.groupby(['gvkey', 'pfy']).nth(11)
df3['yret'] = winsorize(df3['yret'], limits=[0.025,0.025])
df3 = df3.drop(['mretfactor'], axis=1) # "axis=1" means to drop column
# Accounting data
# 8 Read Compustat accounting data
df4 = pd.read_csv(myfolder / 'accounting-data2.csv', parse_dates = ['datadate'])
df4 = df4.sort_values(by=['gvkey','datadate'])
# 9 Create portfolio formation year (pfy) variable, portfolio formation in April where
# pfy = current year for Jan-Mar year-end dates and next year for Apr-Dec year-end dates.
# This is to facilitate compounding returns over July-June by pfy below.
# dt.year is pandas method to extract year from 'datadate' variable
# dt.month is pandas method to extract month from 'datadate' variable
df4['year'], df4['month'] = df4['datadate'].dt.year, df4['datadate'].dt.month
df4['pfy'] = np.where(df4.month < 4, df4.year, df4.year + 1)
# 10 Compute accounting variables from Compustat data, keep relevant variables, delete missing values
# Profitability
df4['ROA'] = df4['ni'] / df4['at']
df4['ROA_prev'] = df4.groupby('gvkey')['ROA'].shift(1)
# Leverage
df4['Leverage_ratio'] = df4['dltt'] / df4['seq']
df4['Leverage_ratio_prev'] = df4.groupby('gvkey')['Leverage_ratio'].shift(1)
df4['Current_ratio'] = df4['act'] / df4['lct']
df4['Current_ratio_prev'] = df4.groupby('gvkey')['Current_ratio'].shift(1)
df4['csho_prev'] = df4.groupby('gvkey')['csho'].shift(1)
df4['Shares_issued'] = df4['csho'] - df4['csho_prev']
# Operating
df4['GP_margin'] = df4['gp'] / df4['revt']
df4['GP_margin_prev'] = df4.groupby('gvkey')['GP_margin'].shift(1)
df4['at_prev'] = df4.groupby('gvkey')['at'].shift(1)
df4['at_average']= (df4['at'] + df4['at_prev'])/2
df4['Asset_TO'] = df4['revt'] / df4['at_average']
df4['Asset_TO_prev'] = df4.groupby('gvkey')['Asset_TO'].shift(1)
df4['GP_profitability'] = df4['gp']/df4['at']
df4 = df4[['ib', 'gvkey', 'pfy', 'ni', 'oancf', 'mkvalt', 'gsector', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability' ]]
df4 = df4[np.isfinite(df4)]
df4 = df4.dropna()
# 11 EDA before winsorize
dfeda = df4[['ROA', 'ROA_prev', 'oancf', 'ib', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'mkvalt', 'ni']]
dfeda['PE'] = dfeda['mkvalt'] / dfeda['ni']
dfeda['CROA'] = dfeda['ROA'] - dfeda['ROA_prev']
dfeda['Cquality'] = np.where(dfeda['oancf']> dfeda['ib'], 1, 0)
dfeda2 = dfeda[['ROA', 'oancf', 'CROA', 'Cquality', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'PE']]
print('EDA before winsorize \n\n', dfeda2.describe(), '\n'*5, file=outfile)
# 12 Winsorize variables at 2.5% of left and right tails
for var in ['ib', 'ni', 'oancf', 'mkvalt', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability']:
df4[var] = winsorize(df4[var], limits=[0.025,0.025])
# 13 EDA after winsorize
dfeda3 = df4[['ROA', 'ROA_prev', 'oancf', 'ib', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'mkvalt', 'ni']]
dfeda3['PE'] = dfeda3['mkvalt'] / dfeda3['ni']
dfeda3['CROA'] = dfeda3['ROA'] - dfeda3['ROA_prev']
dfeda3['Cquality'] = np.where(dfeda3['oancf']> dfeda3['ib'], 1, 0)
dfeda4 = dfeda3[['ROA', 'oancf', 'CROA', 'Cquality', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'PE']]
print('EDA after winsorize \n\n', dfeda4.describe(), '\n'*5, file=outfile)
# Merge Stock returns data with Accounting data
# 14 Merge accounting dataset (df4) with returns dataset (df3)
# "inner" means to merge only observations that have data in BOTH datasets
df5 = pd.merge(df3, df4, how='inner', on=['gvkey', 'pfy'])
df5 = df5[['ib', 'gvkey', 'conm', 'pfy', 'yret', 'ni', 'mkvalt', 'oancf', 'gsector', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability']]
# Compute F-score
# 15 Compute 9 F-score ratios
# Profitability
df5['F_income'] = np.where(df5['ROA']> 0, 1, 0)
df5['F_opcash'] = np.where(df5['oancf']> 0, 1, 0)
df5['F_ROA'] = np.where(df5['ROA']>df5['ROA_prev'], 1, 0)
df5['F_quality'] = np.where(df5['oancf']> df5['ib'], 1, 0)
# Leverage
df5['F_leverage'] = np.where(df5['Leverage_ratio']< df5['Leverage_ratio_prev'], 1, 0)
df5['F_currentratio'] = np.where(df5['Current_ratio']> df5['Current_ratio_prev'], 1, 0)
df5['F_dilute'] = np.where(df5['Shares_issued']< 0 , 1, 0)
# Operating
df5['F_GPM'] = np.where(df5['GP_margin']< df5['GP_margin_prev'], 1, 0)
df5['F_ATO'] = np.where(df5['Asset_TO']< df5['Asset_TO_prev'], 1, 0)
# 16 Group F-score based on categories
df5['F-profitability'] = df5['F_income'] + df5['F_opcash'] + df5['F_ROA'] + df5['F_quality']
df5['F_leverage_liquidity'] = df5['F_leverage'] + df5['F_currentratio'] + df5['F_dilute']
df5['F_operating'] = df5['F_GPM'] + df5['F_ATO']
df5['F_score'] = df5['F-profitability'] + df5['F_leverage_liquidity'] + df5['F_operating']
# Long Portfolio
# 17 Filter out F_score more than 7
df6 = df5[df5.F_score > 7]
# 18 Average PE per pfy per gsector
df6['PE'] = df6['mkvalt'] / df6['ni']
df7 = df6.groupby(['pfy','gsector'], as_index=False)['PE'].mean()
# 19 Filter for stocks with PE lower than gsector average
df8 = df6.merge(df7, on = ['pfy','gsector'], how='left')
df8['y_x'] = df8['PE_y'] - df8['PE_x']
df11 = df8[df8['y_x'] > 0]
# 20 Finding the number of unique company/gvkey in our long portfolio
df12 = df11['gvkey'].unique()
# 21 Mean yret of each pfy
df23 = pd.DataFrame(df11.groupby(['pfy'], as_index=False)['yret'].mean())
df23.rename(columns={'yret':'pyret'}, inplace = True)
# 22 add pfy count number
df24 = df11.groupby(['pfy'], as_index=False)['yret'].count()
df25 = pd.merge(df23, df24, how='inner', on=['pfy'])
df25.rename(columns={'yret':'count'}, inplace = True)
# 23 Compute yearly return compounding factor (1+yearly return)
df25['ppyret'] = df25['pyret'] + 1
# Risk free rate
# 24 Calculate risk free rate using UStreasury 1month
import quandl
from datetime import datetime
# Key in your quandl api key below
QUANDL_API_KEY = 'key in your quandl api key here'
quandl.ApiConfig.api_key = QUANDL_API_KEY
start = datetime(2002, 1, 1)
end = datetime(2020, 12, 31)
rf = quandl.get('USTREASURY/YIELD.1',start_date=start, end_date=end)
risk_free = rf['1 MO']
rfr = risk_free.mean()/100
# 25 Annualise the total return, based on average and total
Lportfolio_annualised_return_rut = scs.gmean(df25.loc[:,"ppyret"])-1
# 26 Calculate annualized volatility from the standard deviation
Lportfolio_vola_rut = np.std(df25['pyret'], ddof=1)
# 27 Calculate the Sharpe ratio
Lportfolio_sharpe_rut = ((Lportfolio_annualised_return_rut - rfr)/ Lportfolio_vola_rut)
# 28 Define negative returns and compute standard deviation
Lportfolio_negative_ret_rut = df25.loc[df25['pyret'] < 0]
Lportfolio_expected_ret_rut = np.mean(df25['pyret'])
Lportfolio_downside_std_rut = Lportfolio_negative_ret_rut['pyret'].std()
# 29 Compute Sortino Ratio
Lportfolio_sortino_rut = (Lportfolio_expected_ret_rut - rfr)/Lportfolio_downside_std_rut
# 30 Compute Worst and Best pfy return
Lpcolumn = df25["pyret"]
Lpmax_value = Lpcolumn.max()
Lpmin_value = Lpcolumn.min()
# 31 Compute % of profitable pfy
Lpprofitable_pfy = len(df25[df25['pyret']>0]['pyret'])/len(df25['pyret'])
# 32 Compute long portfolio monthly price
#Merge long portofio df11 with stock return to get monthly close price
col = ['pfy','gvkey']
df21 = df11[col]
df26 = pd.merge(df1, df21, how='inner', on=['gvkey', 'pfy'])
# Calculate long portfolio monthly price
df27 = df26.groupby(['pfy','month'], as_index=False)['prccm'].mean()
# 33 Compute max drawdown and duration
# Initialize variables: hwm (high watermark), drawdown, duration
lphwm = np.zeros(len(df27))
lpdrawdown = np.zeros(len(df27))
lpduration = 0
# 34 Determine maximum drawdown (maxDD)
for t in range(len(df27)):
lphwm[t] = max(lphwm[t-1], df27['prccm'][t])
lpdrawdown[t] = ((lphwm[t] - df27.prccm[t]) / lphwm[t]) * 100
lpmaxDD = lpdrawdown.max()
# 35 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(df27)):
if np.allclose(lpdrawdown[j], lpmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(df27.prccm[k], lphwm[j], atol=1e-8):
lpduration = j - k
else:
continue
else:
continue
# Short portfolio
# 36 Filter out F_score less than 2
df28 = df5[df5.F_score < 2]
# 37 Average PE per pfy per gsector
df28['PE'] = df28['mkvalt'] / df28['ni']
df29 = df28.groupby(['pfy','gsector'], as_index=False)['PE'].mean()
# 38 Filter for stocks with PE lower than gsector average
df30 = df28.merge(df29, on = ['pfy','gsector'], how='left')
df30['y_x'] = df30['PE_y'] - df30['PE_x']
df33 = df30[df30['y_x'] > 0]
# 39 Finding the number of unique company/gvkey in our short portfolio
df34 = df33['gvkey'].unique()
# 40 Mean yret of each pfy
df37 = pd.DataFrame(df33.groupby(['pfy'], as_index=False)['yret'].mean())
df37.rename(columns={'yret':'pyret'}, inplace = True)
# 41 add pfy count number
df38 = df33.groupby(['pfy'], as_index=False)['yret'].count()
df39 = | pd.merge(df37, df38, how='inner', on=['pfy']) | pandas.merge |
import csv
import gzip
import numpy as np
import pandas as pd
def infer_delim(file):
"""
Sniff the delimiter in the given file
Parameters
----------
file : str
File name
Return
------
the delimiter used in the dataframe (typically either tab or commas)
"""
try:
with open(file, "r") as csvfile:
line = csvfile.readline()
except UnicodeDecodeError:
with gzip.open(file, "r") as gzipfile:
line = gzipfile.readline().decode()
dialect = csv.Sniffer().sniff(line)
return dialect.delimiter
def load_profiles(profiles):
"""
Unless a dataframe is provided, load the given profile dataframe from path or string
Parameters
----------
profiles : {str, pandas.DataFrame}
file location or actual pandas dataframe of profiles
Return
------
pandas DataFrame of profiles
"""
if not isinstance(profiles, pd.DataFrame):
try:
delim = infer_delim(profiles)
profiles = pd.read_csv(profiles, sep=delim)
except FileNotFoundError:
raise FileNotFoundError(f"{profiles} profile file not found")
return profiles
def load_platemap(platemap, add_metadata_id=True):
"""
Unless a dataframe is provided, load the given platemap dataframe from path or string
Parameters
----------
platemap : pandas dataframe
location or actual pandas dataframe of platemap file
add_metadata_id : bool
boolean if "Metadata_" should be appended to all platemap columns
Return
------
platemap : pandas.core.frame.DataFrame
pandas DataFrame of profiles
"""
if not isinstance(platemap, pd.DataFrame):
try:
delim = infer_delim(platemap)
platemap = pd.read_csv(platemap, sep=delim)
except FileNotFoundError:
raise FileNotFoundError(f"{platemap} platemap file not found")
if add_metadata_id:
platemap.columns = [
f"Metadata_{x}" if not x.startswith("Metadata_") else x
for x in platemap.columns
]
return platemap
def load_npz(npz_file, fallback_feature_prefix="DP"):
"""
Load an npz file storing features and, sometimes, metadata.
The function will first search the .npz file for a metadata column called
"Metadata_Model". If the field exists, the function uses this entry as the
feature prefix. If it doesn't exist, use the fallback_feature_prefix.
If the npz file does not exist, this function returns an empty dataframe.
Parameters
----------
npz_file : str
file path to the compressed output (typically DeepProfiler output)
fallback_feature_prefix :str
a string to prefix all features [default: "DP"].
Return
------
df : pandas.core.frame.DataFrame
pandas DataFrame of profiles
"""
try:
npz = np.load(npz_file, allow_pickle=True)
except FileNotFoundError:
return | pd.DataFrame([]) | pandas.DataFrame |
# read_log_file.py
#
# Given a log file, parse through that file and return a gc_events_dataframe containing all
# relevant information from the log during runtime
from src.parse_log_file import get_parsing_groups
import pandas as pd
import numpy as np
import re
import glob
import matplotlib
# get_file_names_wildcard
#
# Given a path including a linux style wildcard search, return the list of all matching files
# on that path. Each file is a string.
#
def get_file_names_wildcard(path):
files = []
filelist = glob.glob(path)
if not filelist:
print("Warning: No files collected using following path: " + str(path))
return []
else:
for file in filelist:
files.append(file)
return files
# get_gc_event_tables
#
# Take a list of list of log file paths/names, and construct a list of tables, one for
# each log in the list. Creates correct TimeFromStart_seconds time column for data, scaling
# based on unit present in log file
#
def get_gc_event_tables(files, zero_times=True, ignore_crashes = False):
# Files must be a list of strings
# Time range in seconds is either a list with 2 values,
# or a single integer max time.
if ignore_crashes:
print("Warning: ignore_crashes takes log files and ignores all crashes.")
if not files:
print("Warning: Files list empty in get_parsed_comparions_from_files")
return []
# all_runs
all_runs = []
for filelist in files:
gc_event_dataframes = [] # associated with one GC run.
for file in filelist:
# Create each log gc_event_dataframe
gc_event_dataframe = get_parsed_data_from_file(file, ignore_crashes)
gc_event_dataframe = scale_time(gc_event_dataframe)
gc_event_dataframe = scale_heap_allocation(gc_event_dataframe)
if not gc_event_dataframe.empty:
gc_event_dataframes.append(gc_event_dataframe)
else:
print("No information collected for file: ", file)
if gc_event_dataframes:
df = | pd.concat(gc_event_dataframes) | pandas.concat |
# %%
"""Image Regression Baseline"""
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import os
import sys
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from PIL import Image
import tensorflow as tf
from tensorflow.keras import callbacks, layers, losses, models
# %%
print(tf.__version__)
# %% [markdown]
# ## Data
# %%
# select distinct
df2 = | pd.read_csv('./data/all_sb.csv') | pandas.read_csv |
#!/usr/bin/python3
import sys
import os
from tqdm import tqdm
from binascii import b2a_hex
import pandas as pd
import pickle
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTTextBox, LTTextLine, LTFigure, LTImage, LTChar, LTPage
from logging import getLogger, StreamHandler, Formatter, DEBUG, INFO, WARN
formatter = Formatter('%(asctime)s %(name)s[%(levelname)s] %(message)s', "%Y-%m-%d %H:%M:%S")
logger = getLogger(__name__)
logger.setLevel(INFO)
handler = StreamHandler()
handler.setLevel(logger.getEffectiveLevel())
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
def with_pdf(pdf_doc, fn, pdf_pwd, *args):
"""Open the pdf document, and apply the function, returning the results"""
result = None
try:
# open the pdf file
fp = open(pdf_doc, "rb")
# create a parser object associated with the file object
parser = PDFParser(fp)
# create a PDFDocument object that stores the document structure
doc = PDFDocument(parser, pdf_pwd)
# connect the parser and document objects
parser.set_document(doc)
if doc.is_extractable:
# apply the function and return the result
result = fn(doc, *args)
# close the pdf file
fp.close()
except IOError:
# the file doesn't exist or similar problem
pass
return result
# Table of Contents
def _parse_toc(doc):
"""With an open PDFDocument object, get the table of contents (toc) data
[this is a higher-order function to be passed to with_pdf()]"""
toc = []
try:
outlines = doc.get_outlines()
for (level, title, dest, a, se) in outlines:
toc.append((level, title))
except PDFNoOutlines:
pass
return toc
def get_toc(pdf_doc, pdf_pwd=""):
"""Return the table of contents (toc), if any, for this pdf file"""
return with_pdf(pdf_doc, _parse_toc, pdf_pwd)
# Extracting Images
def write_file(folder, filename, filedata, flags="w"):
"""Write the file data to the folder and filename combination
(flags: 'w' for write text, 'wb' for write binary, use 'a' instead of 'w' for append)"""
if os.path.isdir(folder):
file_obj = open(os.path.join(folder, filename), flags)
file_obj.write(filedata)
file_obj.close()
def determine_image_type(stream_first_4_bytes):
"""Find out the image file type based on the magic number comparison of the first 4 (or 2) bytes"""
file_type = None
bytes_as_hex = str(b2a_hex(stream_first_4_bytes))
if bytes_as_hex.startswith("ffd8"):
file_type = ".jpeg"
elif bytes_as_hex == "89504e47":
file_type = ".png"
elif bytes_as_hex == "47494638":
file_type = ".gif"
elif bytes_as_hex.startswith("424d"):
file_type = ".bmp"
return file_type
def save_image(lt_image, page_number, images_folder):
"""Try to save the image data from this LTImage object, and return the file name, if successful"""
if not lt_image.stream: raise RuntimeError
file_stream = lt_image.stream.get_rawdata()
if not file_stream: raise RuntimeError
file_ext = determine_image_type(file_stream[0:4])
if not file_ext: raise RuntimeError
file_name = "".join([str(page_number), "_", lt_image.name, file_ext])
write_file(images_folder, file_name, file_stream, flags="wb")
return file_name
# Extracting Text
def to_bytestring(s, enc="utf-8"):
"""Convert the given unicode string to a bytestring, using the standard encoding,
unless it's already a bytestring"""
if s:
if isinstance(s, str):
return s
else:
return s.encode(enc)
def update_page_text(df, lt_obj, pct=0.2, logger=logger):
"""
Use the bbox x0,x1 values within pct% to produce lists of associated text within the hash
df:
cols = [x0, y0, x1, y1, class, objs, str]
"""
if df is None: df = | pd.DataFrame(columns=['x0', 'y0', 'x1', 'y1', 'class', 'objs', 'str']) | pandas.DataFrame |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from pandas.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lenlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sampleSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.append(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.append(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((len(X) - 1) / len(X)) # std factor corretion
mean_ = np.mean(X, 0)
scale_ = np.std(X, 0)
X = X - mean_
X = X / (scale_ * correction)
return X
def gof(self):
r2mean = np.mean(self.r2.T[self.endoexo()[0]].values)
AVEmean = self.AVE().copy()
totalblock = 0
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = len(block.columns.values)
totalblock += block
AVEmean[self.latent[i]] = AVEmean[self.latent[i]] * block
AVEmean = np.sum(AVEmean) / totalblock
return np.sqrt(AVEmean * r2mean)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.copy()
# comun_ = self.data.copy()
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(len(outer_), 1)
loadings = loadings.reshape(len(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = pd.DataFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = pd.concat([outer_residuals, inner_residuals], axis=1)
mean_ = np.mean(self.data, 0)
# comun_ = comun_.apply(lambda row: row + mean_, axis=1)
sumOuterResid = pd.DataFrame.sum(
pd.DataFrame.sum(outer_residuals**2))
sumInnerResid = pd.DataFrame.sum(
pd.DataFrame.sum(inner_residuals**2))
divFun = sumOuterResid + sumInnerResid
return residuals, outer_residuals, inner_residuals, divFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).mean())
return srmr
def implied(self):
corLVs = pd.DataFrame.cov(self.fscores)
implied_ = pd.DataFrame.dot(self.outer_loadings, corLVs)
implied = pd.DataFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(len(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return pd.DataFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = pd.DataFrame(0, index=range(1, 6), columns=manifests)
for i in range(len(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].value_counts()
frequencia = frequencia / len(data) * 100
frequencia = frequencia.reindex_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillna(0).T
frequencia = frequencia[(frequencia.T != 0).any()]
maximo = pd.DataFrame.max(pd.DataFrame.max(data, axis=0))
if int(maximo) & 1:
neg = np.sum(frequencia.ix[:, 1: ((maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((maximo + 1) / 2)]
pos = np.sum(
frequencia.ix[:, (((maximo + 1) / 2) + 1):maximo], axis=1)
else:
neg = np.sum(frequencia.ix[:, 1:((maximo) / 2)], axis=1)
ind = 0
pos = np.sum(frequencia.ix[:, (((maximo) / 2) + 1):maximo], axis=1)
frequencia['Neg.'] = pd.Series(
neg, index=frequencia.index)
frequencia['Ind.'] = pd.Series(
ind, index=frequencia.index)
frequencia['Pos.'] = pd.Series(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMmax = pd.DataFrame.max(SEM)
ok = None
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = pd.concat([block, SEM], axis=1)
for j in range(SEMmax + 1):
dataSEM = (block.loc[data_[segmento] == j]
).drop(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.rename(j + 1)
ok = dataSEM if ok is None else pd.concat(
[ok, dataSEM], axis=1)
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].dropna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.std(self.data, 0)
mean_ = np.mean(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(len(self.data.columns))]
return [mean_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lenlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(len(Beta))]
beta_ = np.diag(beta_)
beta = pd.DataFrame(beta, index=self.latent, columns=self.latent)
mid = pd.DataFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(len(exoVar)):
for i in range(len(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.copy()
beta_ = pd.DataFrame(1, index=np.arange(
len(exoVar)), columns=np.arange(len(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(len(self.path_matrix)))
beta = pd.DataFrame(beta)
partial_ = pd.DataFrame.dot(self.outer_weights, beta.T.values)
prediction = pd.DataFrame.dot(partial_, self.outer_loadings.T.values)
predicted = pd.DataFrame.dot(self.data, prediction)
predicted.columns = self.manifests
mean_ = np.mean(self.data, 0)
intercept = mean_ - np.dot(mean_, prediction)
predictedData = predicted.apply(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.sum(abs(loadings))**2
denominador = numerador + (p - np.sum(loadings ** 2))
cr = numerador / denominador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = len(self.data_)
r2 = self.r2.values
r2adjusted = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
p = sum(self.LVariables['target'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
mean = []
allBlocks = []
for i in range(self.lenlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
allBlocks.append(list(block_.values))
block = htmt_.ix[block_, block_]
mean_ = (block - np.diag(np.diag(block))).values
mean_[mean_ == 0] = np.nan
mean.append(np.nanmean(mean_))
comb = [[k, j] for k in range(self.lenlatent)
for j in range(self.lenlatent)]
comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
for i in range(self.lenlatent ** 2)]
comb__ = []
for i in range(self.lenlatent ** 2):
block = (htmt_.ix[allBlocks[comb[i][1]],
allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.append(np.nanmean(block))
htmt__ = np.divide(comb__, comb_)
where_are_NaNs = np.isnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = pd.DataFrame(np.tril(htmt__.reshape(
(self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
def fornell(self):
cor_ = pd.DataFrame.corr(self.fscores)**2
AVE = self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
for i in range(len(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = pd.DataFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denominador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denominador)
if(np.isnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def xloads(self):
# Xloadings
A = self.data_.transpose().values
B = self.fscores.transpose().values
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
xloads_ = (np.dot(A_mA, B_mB.T) /
np.sqrt(np.dot(ssA[:, None], ssB[None])))
xloads = pd.DataFrame(
xloads_, index=self.manifests, columns=self.latent)
return xloads
def corLVs(self):
# Correlations LVs
corLVs_ = np.tril(pd.DataFrame.corr(self.fscores))
return pd.DataFrame(corLVs_, index=self.latent, columns=self.latent)
def alpha(self):
# Cronbach Alpha
alpha = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
p_ = len(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.sum(block, axis=1))
cor_ = pd.DataFrame.corr(block)
denominador = soma * correction**2
numerador = 2 * np.sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denominador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def vif(self):
vif = []
totalmanifests = range(len(self.data_.columns))
for i in range(len(totalmanifests)):
independent = [x for j, x in enumerate(totalmanifests) if j != i]
coef, resid = np.linalg.lstsq(
self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
r2 = 1 - resid / \
(self.data_.ix[:, i].size * self.data_.ix[:, i].var())
vif.append(1 / (1 - r2))
vif = pd.DataFrame(vif, index=self.manifests)
return vif
def PLSc(self):
##################################################
# PLSc
rA = self.rhoA()
corFalse = self.corLVs()
for i in range(self.lenlatent):
for j in range(self.lenlatent):
if i == j:
corFalse.ix[i][j] = 1
else:
corFalse.ix[i][j] = corFalse.ix[i][
j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
corTrue = np.zeros([self.lenlatent, self.lenlatent])
for i in range(self.lenlatent):
for j in range(self.lenlatent):
corTrue[j][i] = corFalse.ix[i][j]
corTrue[i][j] = corFalse.ix[i][j]
corTrue = | pd.DataFrame(corTrue, corFalse.columns, corFalse.index) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import groupby
from operator import itemgetter
import pandas as pd
from covsirphy.util.error import deprecate, UnExecutedError
from covsirphy.util.term import Term
from covsirphy.cleaning.jhu_data import JHUData
from covsirphy.cleaning.population import PopulationData
from covsirphy.cleaning.oxcgrt import OxCGRTData
from covsirphy.visualization.line_plot import line_plot
from covsirphy.ode.mbase import ModelBase
from covsirphy.phase.phase_estimator import MPEstimator
from covsirphy.analysis.scenario import Scenario
class PolicyMeasures(Term):
"""
Analyse the relationship of policy measures and parameters of ODE models.
This analysis will be done at country level because OxCGRT tracks policies at country level.
Args:
jhu_data (covsirphy.JHUData): object of records
population_data (covsirphy.PopulationData): PopulationData object
tau (int or None): tau value [min]
"""
@deprecate("PolicyMeasures", version="2.19.1-zeta-fu1")
def __init__(self, jhu_data, population_data, oxcgrt_data, tau=None):
# Records
self.jhu_data = self._ensure_instance(
jhu_data, JHUData, name="jhu_data")
# Population
self.population_data = self._ensure_instance(
population_data, PopulationData, name="population_data")
# OxCGRT
self.oxcgrt_data = self._ensure_instance(
oxcgrt_data, OxCGRTData, name="oxcgrt_data")
# tau value must be shared
self.tau = self._ensure_tau(tau)
# Init
self._countries = self._all_countries()
self._init_scenario()
self.model = None
def _all_countries(self):
"""
Return names of countries where records are registered.
Returns:
(list[str]): list of country names
"""
j_list = self.jhu_data.countries(complement=True)
p_list = self.population_data.countries()
o_list = self.oxcgrt_data.countries()
return list(set(j_list) & set(p_list) & set(o_list))
def _init_scenario(self):
"""
Initialize the scenario classes of registered countries.
"""
self.scenario_dict = {
country: Scenario(
self.jhu_data, self.population_data, country=country, tau=self.tau)
for country in self._countries
}
def scenario(self, country):
"""
Return Scenario instance of the country.
Args:
country (str): country name
Raises:
KeyError: the country is not registered
Returns:
covsirphy.Scenario: Scenario instance
"""
if country not in self.scenario_dict.keys():
raise KeyError(f"{country} is not registered.")
return self.scenario_dict[country]
@property
def countries(self):
"""
list[str]: countries to analyse
"""
return self._countries
@countries.setter
def countries(self, country_list):
selected_set = set(country_list)
all_set = set(self._all_countries())
if not selected_set.issubset(all_set):
un_selectable_set = selected_set - all_set
un_selectable = ", ".join(list(un_selectable_set))
raise KeyError(
f"{un_selectable} cannot be selected because records are not registered.")
self._countries = country_list
def trend(self, min_len=2):
"""
Perform S-R trend analysis for all registered countries.
Args:
min_len (int): minimum length of phases to have
Returns:
covsirphy.PolicyMeasures: self
Note:
Countries which do not have @min_len phases will be un-registered.
"""
min_len = self._ensure_natural_int(min_len, name="min_len")
for country in self._countries:
try:
self.scenario_dict[country].trend(set_phases=True, show_figure=False)
except ValueError:
pass
countries = [
country for country in self._countries
if len(self.scenario_dict[country][self.MAIN]) >= min_len
]
self.countries = countries
return self
def summary(self, columns=None, countries=None):
"""
Summarize of scenarios.
Args:
columns (list[str] or None): columns to show
countries (list[str] or None): countries to show
Returns:
pandas.DataFrame
Note:
If @columns is None, all columns will be shown.
"""
countries = countries or self._countries
if not isinstance(countries, (list, set)):
raise TypeError("@countries must be a list or set.")
dataframes = []
for country in countries:
df = self.scenario_dict[country].summary(columns=columns)
df[self.PHASE] = df.index
df[self.COUNTRY] = country
dataframes.append(df)
summary_df = | pd.concat(dataframes, axis=0, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""Utilities."""
# pylint: disable=invalid-name # spyder cannot read good-names from .pylintrc
# pylint: disable=E1101 # "torch has nor 'DoubleTensor' menber"
from copy import copy, deepcopy
import pydot
import sys
import numpy as np
from numpy.random import multivariate_normal, seed
from numpy import (
allclose, array, concatenate, count_nonzero, diag, eye, empty,
fill_diagonal, hstack, isnan, kron, median, nan, ones, reshape, std, tile,
var, vstack, zeros)
import numdifftools as nd
from numpy.linalg import cholesky, inv, norm
from pandas import DataFrame
from scipy.optimize import minimize
from sympy import diff, Heaviside, lambdify
import torch
import svg
# set numpy random seed
seed(1002)
def adjacency(model_dat):
"""numeric function for model and direct effects, identification matrics"""
define_equations = model_dat["define_equations"]
xvars = model_dat["xvars"]
yvars = model_dat["yvars"]
ndim = len(yvars)
mdim = len(xvars)
def equations_alg(xvars, bias=0, bias_ind=0):
"""algebraic equations plus bias containing xvars by substituting yvars"""
equationsx = list(define_equations(*xvars))
equationsx[bias_ind] = bias + equationsx[bias_ind]
for bias_ind in range(ndim):
for j in range(bias_ind + 1, ndim):
if hasattr(equationsx[j], 'subs'):
equationsx[j] = equationsx[j].subs(yvars[bias_ind], equationsx[bias_ind])
return equationsx
# algebraic equations containing xvars and yvars
equations = define_equations(*xvars)
# ToDo modules do not work, therefore replace_heaviside required # yyy
#modules = [{'Heaviside': lambda x: np.heaviside(x, 0)}, 'sympy', 'numpy']
#modules = [{'Heaviside': lambda x: 1 if x > 0 else 0}, 'sympy', 'numpy']
modules = ['sympy', 'numpy']
def model(xvals, bias=0, bias_ind=0):
"""numeric model plus bias in terms of xvars"""
# float for conversion of numpy array from scipy minimize
equationsx = equations_alg(xvars, float(bias), bias_ind)
model_lam = lambdify(xvars, equationsx, modules=modules)
xvals = array(xvals).reshape(mdim, -1)
try:
yhat = array([model_lam(*xval) for xval in xvals.T]).T
except Exception as e:
# find warnings
print(e, "\nFinding erroneous element yhat_it ...")
for t, xval in enumerate(xvals.T):
for i, eq in enumerate(equationsx):
yhat_it = eq.subs(dict(zip(xvars, xval)))
print(DataFrame(xval, xvars, [t]))
print("i = {}, t = {}, yhat_it = {} {}"
.format(i, t, yhat_it, type(yhat_it)))
print(yvars[i], "=", eq)
raise ValueError(e)
return yhat.astype(np.float64)
# algebraic direct effects containing xvars and yvars
mx_alg = array([[diff(eq, xvar) for xvar in xvars] for eq in equations])
my_alg = array([[diff(eq, yvar) for yvar in yvars] for eq in equations])
# algebraic direct effects as lamba function of xvars, yvars
# and then only as function of xvars
mx_lamxy = lambdify((xvars, yvars), mx_alg, modules=modules)
my_lamxy = lambdify((xvars, yvars), my_alg, modules=modules)
def mx_lam(xvars):
return mx_lamxy(xvars, equations_alg(xvars))
def my_lam(xvars):
return my_lamxy(xvars, equations_alg(xvars))
# identification matrics for direct effects
idx = digital(mx_alg)
idy = digital(my_alg)
adjacency_dat = {
"model": model,
"mx_alg": mx_alg,
"my_alg": my_alg,
"mx_lam": mx_lam,
"my_lam": my_lam,
"idx": idx,
"idy": idy,
}
model_dat.update(adjacency_dat)
return model_dat
def simulate(model_dat):
"""simulate exogeneous x and corresponding endogeneous y data for example equations"""
# dimensions
ndim = len(model_dat["yvars"])
mdim = len(model_dat["xvars"])
selvec = zeros(ndim)
selvec[[list(model_dat["yvars"]).index(el) for el in model_dat["ymvars"]]] = 1
selmat = diag(selvec)
selvecc = selvec.reshape(ndim, 1)
fym = eye(ndim)[diag(selmat) == 1]
# compute theoretical RAM covariance matrices # ToDo: needed? # yyy
sigmax_theo = model_dat["sigx_theo"] * (
model_dat["rho"] * ones((mdim, mdim)) + (1-model_dat["rho"]) * eye(mdim))
sigmau_theo = model_dat["sigym_theo"] * (
model_dat["rho"] * selvecc @ selvecc.T + (1-model_dat["rho"]) * selmat)
# symmetrize covariance matrices, such that numerically well conditioned
sigmax_theo = (sigmax_theo + sigmax_theo.T) / 2
sigmau_theo = (sigmau_theo + sigmau_theo.T) / 2
# simulate x data
# use cholesky to avoid numerical random normal posdef problem, old:
# xdat = multivariate_normal(model_dat["xmean_true"], sigmax_theo, model_dat["tau"]).T
xdat = multivariate_normal(zeros(mdim), eye(mdim), model_dat["tau"]).T
xdat = array(model_dat["xmean_true"]).reshape(mdim, 1) + cholesky(sigmax_theo) @ xdat
# ymdat from yhat with enndogenous errors
model = adjacency(model_dat)["model"] # model constructed from adjacency
yhat = model(xdat)
ymdat = fym @ (yhat + multivariate_normal(zeros(ndim), sigmau_theo, model_dat["tau"]).T)
# delete nan columns
colind = ~np.any(isnan(ymdat), axis=0)
if sum(colind) > 0:
xdat = xdat[:, colind]
ymdat = ymdat[:, colind]
# new tau after None columns deleted
tau_new = ymdat.shape[1]
if tau_new < model_dat["tau"]:
raise ValueError("Model observations reduced from {} to {} because some simulations failed."
.format(model_dat["tau"], tau_new))
# test bias estimation
#ymdat[-1, :] += 66
return xdat, ymdat
def replace_heaviside(mxy, xvars, xval):
"""deal with sympy Min and Max giving Heaviside:
Heaviside(x) = 0 if x < 0 and 1 if x > 0, but
Heaviside(0) needs to be defined by user,
we set Heaviside(0) to 0 because in general there is no sensititvity,
the numpy heaviside function is lowercase and wants two arguments:
an x value, and an x2 to decide what should happen for x==0
https://stackoverflow.com/questions/60171926/sympy-name-heaviside-not-defined-within-lambdifygenerated
"""
for i in range(mxy.shape[0]):
for j in range(mxy.shape[1]):
if hasattr(mxy[i, j], 'subs'):
# ToDo: rename, check # yyyy
# just for german_insurance substitute xvars again since
# mxy still has sympy xvars reintroduced via yvars_elim
mxy[i, j] = mxy[i, j].subs(dict(zip(xvars, xval)))
#if mxy[i, j] != mxy[i, j].subs(Heaviside(0), 0):
# print("replaced {} by {} in element {} {}"
# .format(mxy[i, j], mxy[i, j].subs(Heaviside(0), 0), i, j))
mxy[i, j] = mxy[i, j].subs(Heaviside(0), 0)
return mxy.astype(np.float64)
def create_model(model_dat):
"""specify model and compute effects"""
# dimensions
ndim = len(model_dat["yvars"])
mdim = len(model_dat["xvars"])
pdim = len(model_dat["ymvars"])
selvec = zeros(ndim)
selvec[[list(model_dat["yvars"]).index(el) for el in model_dat["ymvars"]]] = 1
selmat = diag(selvec)
tau = model_dat["xdat"].shape[1]
selvec = diag(selmat)
# check
if model_dat["ymdat"].shape[0] != pdim:
raise ValueError("Number of yvars {} and ymdat {} not identical."
.format(model_dat["ymdat"].shape[0], pdim))
# numeric function for model and direct effects, identification matrics
model_dat.update(adjacency(model_dat))
# yhat without enndogenous errors
yhat = model_dat["model"](model_dat["xdat"])
yhat = vstack(yhat).reshape(len(model_dat["yvars"]), -1)
# means and demeaning data for estimation of linear total derivative
xmean = model_dat["xdat"].mean(axis=1)
ydet = model_dat["model"](xmean)
ymean = yhat.mean(axis=1)
ymmean = model_dat["ymdat"].mean(axis=1)
ymedian = median(yhat, axis=1)
xmedian = median(model_dat["xdat"], axis=1)
xcdat = model_dat["xdat"] - xmean.reshape(mdim, 1)
ymcdat = model_dat["ymdat"] - ymmean.reshape(pdim, 1)
# effect identification matrices
edx, edy = compute_ed(model_dat["idx"], model_dat["idy"])
_, _, fdx, fdy = compute_fd(model_dat["idx"], model_dat["idy"],
model_dat["yvars"], model_dat["final_var"])
# more dimensions
qxdim = count_nonzero(model_dat["idx"])
qydim = count_nonzero(model_dat["idy"])
qdim = qxdim + qydim
# model summary
print("Causing starting")
print("\nModel with {} endogenous and {} exogenous variables, "
"{} direct effects and {} observations."
.format(ndim, mdim, qdim, tau))
# individual theoretical effects
(mx_theos, my_theos, ex_theos, ey_theos, exj_theos, eyx_theos, eyj_theos, eyy_theos
) = ([] for i in range(8))
for obs in range(min(tau,
model_dat["show_nr_indiv"])):
xval = model_dat["xdat"][:, obs]
# numeric direct effects since no sympy algebraic derivative
mx_theo = replace_heaviside(array(model_dat["mx_lam"](xval)), model_dat["xvars"], xval) # yyy
my_theo = replace_heaviside(array(model_dat["my_lam"](xval)), model_dat["xvars"], xval) # yyy
# total and final effects
ex_theo, ey_theo = total_effects_alg(mx_theo, my_theo, edx, edy)
exj_theo, eyj_theo, eyx_theo, eyy_theo = compute_mediation_effects(
mx_theo, my_theo, ex_theo, ey_theo, model_dat["yvars"], model_dat["final_var"])
# append
mx_theos.append(mx_theo)
my_theos.append(my_theo)
ex_theos.append(ex_theo)
ey_theos.append(ey_theo)
exj_theos.append(exj_theo)
eyx_theos.append(eyx_theo)
eyj_theos.append(eyj_theo)
eyy_theos.append(eyy_theo)
# theoretical total effects at xmean and corresponding consistent ydet,
# using closed form algebraic formula from sympy direct effects
# instead of automatic differentiation of model
mx_theo = replace_heaviside(array(model_dat["mx_lam"](xmean)), model_dat["xvars"], xmean) # yyy
my_theo = replace_heaviside(array(model_dat["my_lam"](xmean)), model_dat["xvars"], xmean) # yyy
ex_theo, ey_theo = total_effects_alg(mx_theo, my_theo, edx, edy)
exj_theo, eyj_theo, eyx_theo, eyy_theo = compute_mediation_effects(
mx_theo, my_theo, ex_theo, ey_theo, model_dat["yvars"], model_dat["final_var"])
direct_theo = directvec_alg(mx_theo, my_theo, model_dat["idx"], model_dat["idy"])
# selwei whitening matrix of manifest demeaned variables
selwei = diag(1 / var(ymcdat, axis=1))
# ToDo: some entries are just passed directly, use update for others
setup_dat = {
"direct_theo": direct_theo,
"selmat": selmat,
"selvec": selvec,
"selwei": selwei,
"ymean": ymean,
"xmedian": xmedian,
"ymedian": ymedian,
"ydet": ydet,
"tau": tau,
"xcdat": xcdat,
"ymcdat": ymcdat,
"yhat": yhat,
"xmean": xmean,
"mx_theo": mx_theo,
"my_theo": my_theo,
"ex_theo": ex_theo,
"ey_theo": ey_theo,
"exj_theo": exj_theo,
"eyx_theo": eyx_theo,
"eyj_theo": eyj_theo,
"eyy_theo": eyy_theo,
"mx_theos": mx_theos,
"my_theos": my_theos,
"ex_theos": ex_theos,
"ey_theos": ey_theos,
"exj_theos": exj_theos,
"eyx_theos": eyx_theos,
"eyj_theos": eyj_theos,
"eyy_theos": eyy_theos,
}
model_dat.update(setup_dat)
model_dat = update_model(model_dat)
return model_dat
def nonzero(el):
"""identifies nonzero element"""
if el == 0:
nonz = 0
if el != 0:
nonz = 1
return nonz
def roundec(num, dec=None):
"""rounds number or string to dec decimals,
converts to string and strips trailing zeros and dot from the right"""
# dec
if not dec:
limit_dec = 1000 # ToDo: set limit_dec globally # yyy
if abs(num) < limit_dec:
dec = 2
else:
dec = 0
string = ("{0:." + str(dec) + "f}").format(float(num)).rstrip("0").rstrip(".")
return string
def submatrix(mat, j):
"""computes submatrix or -vector by replacing j-th row and column by zeros"""
ndim = mat.shape[0]
mdim = mat.shape[1]
sub = deepcopy(mat)
mzeros = zeros(mdim)
nzeros = zeros(ndim)
if ndim > 1:
sub[j, :] = mzeros
if mdim > 1:
sub[:, j] = nzeros
return sub
def compute_ed(idx, idy):
"""compute total effects identification matrices
from direct identification matrices or direct effects"""
edx, edy = total_effects_alg(idx, idy, None, None)
edx = digital(edx)
edy = digital(edy)
return edx, edy
def compute_fd(idx, idy, yvars, final_var):
"""compute mediation effects identification matrices
from direct identification matrices or direct effectss"""
edx, edy = compute_ed(idx, idy)
exj, eyj, eyx, eyy = compute_mediation_effects(idx, idy, edx, edy, yvars, final_var)
fdxj = digital(exj)
fdyj = digital(eyj)
fdx = digital(eyx)
fdy = digital(eyy)
return fdxj, fdyj, fdx, fdy
def total_effects_alg(mx, my, edx, edy):
"""compute algebraic total effects given direct effects and identification matrices"""
# dimensions
ndim = mx.shape[0]
# error if my is not normalized
if sum(abs(diag(my))) > 0:
raise ValueError("No Normalization. Diagonal elements of 'my' differ from zero.")
# total effects
ey = inv(eye(ndim) - my)
ex = ey @ mx
# set fixed null and unity effects numerically exactly to 0 and 1
if edx is not None:
ex[edx == 0] = 0
if edy is not None:
ey[edy == 0] = 0
fill_diagonal(ey, 1)
return ex, ey
def sse_orig(mx, my, fym, ychat, ymcdat, selwei, model_dat):
"""weighted MSE target function plus Tikhonov regularization term"""
# weighted mean squared error
ymchat = fym @ ychat
err = ymchat - ymcdat
#sse = torch.trace(err.T @ selwei @ err) # big matrix needs too much RAM
# elementwise multiplication and broadcasting and summation:
sse = sum(torch.sum(err * err * torch.diag(selwei).view(-1, 1), dim=0))
# sse with tikhonov term
direct = directvec(mx, my, model_dat["idx"], model_dat["idy"])
ssetikh = sse + model_dat["alpha"] * direct.T @ direct
return ssetikh.requires_grad_(True)
class StructuralNN(torch.nn.Module):
"""AD identified structural linear nn,
linear ychat approximation using ex effects reduced form"""
def __init__(self, model_dat):
super(StructuralNN, self).__init__()
self.eye = torch.DoubleTensor(eye(model_dat["ndim"]))
self.idx = torch.DoubleTensor(model_dat["idx"])
self.idy = torch.DoubleTensor(model_dat["idy"])
self.xcdat = torch.DoubleTensor(model_dat["xcdat"])
def forward(self, mx, my):
# impose identification restrictions already on imput
# ToDo: use torch.nn.utils.prune custom_from_mask or own custom method
mx = mx * self.idx
my = my * self.idy
ey = (self.eye - my).inverse()
ex = ey @ mx
dy = ex @ self.xcdat # reduced form
ychat = dy
return ychat
def optimize_ssn(ad_model, mx, my, fym, ydata, selwei, model_dat,
optimizer, params, do_print=True):
"""ad torch optimization of structural neural network"""
# parameters
rel = 0.0001 # ToDo: define globally
nr_conv_min = 5 # ToDo: define globally
sse = torch.DoubleTensor([0])
sse_old = torch.DoubleTensor([1])
nr_conv = 0
epoch = 0
while nr_conv < nr_conv_min:
sse_old = copy(sse)
ychat = ad_model(*params)
sse = sse_orig(mx, my, fym, ychat, ydata, selwei, model_dat) # forward
optimizer.zero_grad()
sse.backward(create_graph=True) # backward
optimizer.step()
if abs(sse - sse_old) / sse_old < rel:
nr_conv += 1
else:
nr_conv = 0
nrm = sum([torch.norm(param) for param in params]).detach().numpy()
if do_print:
print("epoch {:>4}, sse {:10f}, param norm {:10f}".format(epoch, sse.item(), nrm))
epoch += 1
return sse
def estimate_snn(model_dat, do_print=True):
"""estimate direct effects in identified structural form
using PyTorch AD automatic differentiation
forcasting y is done by reduced form since it is already solved for dy
structural form:
dy = my @ dy + mx @ dx
mx, my is a linear network of at most ndim + mdim layers of max of max dims max(ndim, mdim)
with identifiying restrictions idx, idy
reduced form:
dy = ex @ dx
ex is a linear network with one layer of dimension (ndim, mdim)
with restrictions edx
Estimating effects with automatic differentiation only works for DAG
"""
fym = torch.DoubleTensor(model_dat["fym"])
selwei = torch.DoubleTensor(model_dat["selwei"])
# start at theoretical direct effects
mx = torch.DoubleTensor(deepcopy(model_dat["mx_theo"]))
my = torch.DoubleTensor(deepcopy(model_dat["my_theo"]))
# define optimization parameters
ydata = torch.DoubleTensor(model_dat["ymcdat"]) # ymcdat
mx.requires_grad_(True)
my.requires_grad_(True)
params = [mx, my]
ad_model = StructuralNN(model_dat) # ychat
# Adam, Adadelta, Adagrad, AdamW, Adamax, RMSprop, Rprop
optimizer = torch.optim.Rprop(params)
if do_print:
print("\nEstimation of direct effects using a structural neural network \n"
"with regularization parameter alpha = {:10f}:".format(model_dat["alpha"]))
sse = optimize_ssn(ad_model, mx, my, fym, ydata, selwei, model_dat,
optimizer, params, do_print)
mx = mx.detach().numpy()
my = my.detach().numpy()
sse = sse.detach().numpy()
assert allclose(mx, mx * model_dat["idx"]), \
"idx identification restrictions not met:\n{}\n!=\n{}".format(mx, mx * model_dat["idx"])
assert allclose(my, my * model_dat["idy"]), \
"idy identification restrictions not met:\n{}\n!=\n{}".format(my, my * model_dat["idy"])
return mx, my, sse
def sse_bias(bias, bias_ind, model_dat):
"""sum of squared errors given modification indicator, Tikhonov not used"""
yhat = model_dat["model"](model_dat["xdat"], bias, bias_ind)
ymhat = model_dat["fym"] @ yhat
err = ymhat - model_dat["ymdat"]
sse = np.sum(err * err * diag(model_dat["selwei"]).reshape(-1, 1))
print("sse {:10f}, bias {:10f}".format(sse, float(bias)))
return sse
def optimize_biases(model_dat, bias_ind):
"""numerical optimize modification indicator for single equation"""
# optimizations parameters
bias_start = 0
method = 'SLSQP' # BFGS, SLSQP, Nelder-Mead, Powell, TNC, COBYLA, CG
print("\nEstimation of bias for {}:".format(model_dat["yvars"][bias_ind]))
out = minimize(sse_bias, bias_start, args=(bias_ind, model_dat), method=method)
bias = out.x
sse = out.fun
if hasattr(out, 'hess_inv'):
hess_i = inv(out.hess_inv)
print("Scalar Hessian from method {}.".format(method))
else:
hess_i = nd.Derivative(sse_bias, n=2)(bias, bias_ind, model_dat)
print("Scalar Hessian numerically.")
return bias, hess_i, sse
def sse_hess(mx, my, model_dat):
"""compute automatic Hessian of sse at given data and direct effects"""
fym = torch.DoubleTensor(model_dat["fym"])
ydata = torch.DoubleTensor(model_dat["ymcdat"])
selwei = torch.DoubleTensor(model_dat["selwei"])
def sse_orig_vec_alg(direct):
"""computes the ad target function sum of squared errors,
input as tensor vectors, yields Hessian in usual dimension of identified parameters"""
mx, my = directmat(direct, model_dat["idx"], model_dat["idy"])
ad_model = StructuralNN(model_dat)
ychat = ad_model(mx, my)
return sse_orig(mx, my, fym, ychat, ydata, selwei, model_dat)
direct = directvec(mx, my, model_dat["idx"], model_dat["idy"])
hessian = torch.autograd.functional.hessian(sse_orig_vec_alg, direct)
# symmetrize Hessian, such that numerically well conditioned
hessian = hessian.detach().numpy()
hessian = (hessian + hessian.T) / 2
return hessian
def compute_mediation_effects(mx, my, ex, ey, yvars, final_var):
"""compute mediation effects for final variable
use mediation matrix representation with final variable held fixed,
in addition, select corresponding total effects vectors on final var"""
# dimensions
ndim = mx.shape[0]
mdim = mx.shape[1]
jvar = list(yvars).index(final_var)
# corresponding total effects vectors on final var
exj = ex[jvar, :] # (mdim)
eyj = ey[jvar, :] # (ndim)
# mediation effects matrices with final var held fixed
eyx = (eyj.reshape(ndim, 1) @ ones((1, mdim))) * mx # (ndim x mdim)
eyy = (eyj.reshape(ndim, 1) @ ones((1, ndim))) * my # (ndim x ndim)
return exj, eyj, eyx, eyy
def tvals(eff, std):
"""compute t-values by element wise division of eff and std matrices"""
assert eff.shape == std.shape
if len(eff.shape) == 1: # vector
rows = eff.shape[0]
tvalues = empty(rows) * nan
for i in range(rows):
if std[i] != 0:
tvalues[i] = eff[i] / std[i]
if len(eff.shape) == 2: # matrix
rows, cols = eff.shape
tvalues = zeros((rows, cols))
for i in range(rows):
for j in range(cols):
if std[i, j] != 0:
tvalues[i, j] = eff[i, j] / std[i, j]
else:
tvalues[i, j] = nan
return tvalues
def compute_mediation_std(ex_hat_std, ey_hat_std, eyx, eyy, yvars, final_var):
"""compute mediation std"""
# dimensions
ndim = ex_hat_std.shape[0]
mdim = ex_hat_std.shape[1]
jvar = list(yvars).index(final_var)
exj_hat_std = ex_hat_std[jvar, :] # (mdim)
eyj_hat_std = ey_hat_std[jvar, :] # (ndim)
# construct matices of repeating rows
exj_hat_std_mat = tile(exj_hat_std, (ndim, 1)) # (ndim x mdim)
eyj_hat_std_mat = tile(eyj_hat_std, (ndim, 1)) # (ndim x ndim)
# column sums of mediation matrices
x_colsum = np.sum(eyx, axis=0)
y_colsum = np.sum(eyy, axis=0)
# normed mediation matrices by division by column sums,
# zero sum for varaibles w/o effect on others
# substituted by nan to avoid false interpretation
x_colsum[x_colsum==0] = nan
y_colsum[y_colsum==0] = nan
eyx_colnorm = zeros((ndim, mdim)) # (ndim x mdim)
eyx_colnorm[:] = nan
for j in range(mdim):
if not isnan(x_colsum[j]):
eyx_colnorm[:, j] = eyx[:, j] / x_colsum[j]
eyy_colnorm = zeros((ndim, ndim)) # (ndim x ndim)
eyy_colnorm[:] = nan
for j in range(ndim):
if not isnan(y_colsum[j]):
eyy_colnorm[:, j] = eyy[:, j] / y_colsum[j]
# mediation std matrices
eyx_hat_std = exj_hat_std_mat * eyx_colnorm # (ndim x mdim)
eyy_hat_std = eyj_hat_std_mat * eyy_colnorm # (ndim x ndim)
return exj_hat_std, eyj_hat_std, eyx_hat_std, eyy_hat_std
def directmat_alg(direct, idx, idy):
"""algebraic direct effect matrices column-wise
from direct effects vector and id matrices"""
# dimensions
ndim = idx.shape[0]
mdim = idx.shape[1]
qydim = count_nonzero(idy)
# compute direct effects matrices
my = zeros((ndim, ndim))
my.T[idy.T == 1] = direct[0:qydim]
mx = zeros((ndim, mdim))
mx.T[idx.T == 1] = direct[qydim:]
return mx, my
def directmat(direct, idx, idy):
"""automatic direct effects matrices column-wise
from direct effects vector and id matrices"""
# dimensions
ndim = idx.shape[0]
mdim = idx.shape[1]
# compute direct effects matrices
my = torch.DoubleTensor(zeros((ndim, ndim)))
mx = torch.DoubleTensor(zeros((ndim, mdim)))
k = 0
for i in range(ndim):
for j in range(ndim):
if idy[i, j] == 1:
my[i, j] = direct[k]
k += 1
for i in range(ndim):
for j in range(mdim):
if idx[i, j] == 1:
mx[i, j] = direct[k]
k += 1
return mx, my
def directvec_alg(mx, my, idx, idy):
"""algebraic direct effects vector column-wise
from direct effects matrices and id matrices"""
directy = my.T[idy.T == 1]
directx = mx.T[idx.T == 1]
direct = concatenate((directy, directx), axis=0)
return direct
def directvec(mx, my, idx, idy):
"""automatic direct effects vector column-wise
from direct effects matrices and id matrices"""
# dimensions
ndim = idx.shape[0]
mdim = idx.shape[1]
qydim = count_nonzero(idy)
qxdim = count_nonzero(idx)
# compute direct effects vector
direct = torch.DoubleTensor(zeros(qydim + qxdim))
k = 0
for i in range(ndim):
for j in range(ndim):
if idy[i, j] == 1:
direct[k] = my[i, j]
k += 1
for i in range(ndim):
for j in range(mdim):
if idx[i, j] == 1:
direct[k] = mx[i, j]
k += 1
return direct
def total_from_direct(direct, idx, idy, edx, edy):
"""construct total effects vector from direct effects vector and id and ed matrices"""
mx, my = directmat_alg(direct, idx, idy)
ex, ey = total_effects_alg(mx, my, edx, edy)
effects = directvec_alg(ex, ey, edx, edy)
return effects
def digital(mat):
"""transform a matrix or vector to digital matrix,
elements are equal to one if original element is unequal zero, and zero otherwise"""
if len(mat.shape) == 1: # vector
rows = mat.shape[0]
mat_digital = zeros(rows)
for i in range(rows):
if mat[i] != 0:
mat_digital[i] = 1
if len(mat.shape) == 2: # matrix
rows = mat.shape[0]
cols = mat.shape[1]
mat_digital = zeros((rows, cols))
for i in range(rows):
for j in range(cols):
if mat[i, j] != 0:
mat_digital[i, j] = 1
return mat_digital
def print_output(model_dat, estimate_dat, indiv_dat):
"""print theoretical and estimated values to output file"""
# print output file
stdout = sys.stdout
fha = open(model_dat["dir_path"] + "output.txt", 'w')
sys.stdout = fha
# model variables
yx_vars = (model_dat["yvars"], model_dat["xvars"])
yy_vars = (model_dat["yvars"], model_dat["yvars"])
#xyvars = concatenate((model_dat["xvars"], model_dat["yvars"]), axis=0)
# compute dataframe strings for printing
if model_dat["estimate_bias"]:
biases = concatenate(
(estimate_dat["biases"].reshape(1, -1),
estimate_dat["biases_std"].reshape(1, -1),
(estimate_dat["biases"] / estimate_dat["biases_std"]).reshape(1, -1)))
biases_dfstr = DataFrame(biases, ("biases", "std", "t-values"),
model_dat["yvars"]).to_string()
mx_theo_dfstr = DataFrame(model_dat["mx_theo"], *yx_vars).to_string()
my_theo_dfstr = DataFrame(model_dat["my_theo"], *yy_vars).to_string()
ex_theo_dfstr = DataFrame(model_dat["ex_theo"], *yx_vars).to_string()
ey_theo_dfstr = DataFrame(model_dat["ey_theo"], *yy_vars).to_string()
eyx_theo_dfstr = | DataFrame(model_dat["eyx_theo"], *yx_vars) | pandas.DataFrame |
import pathlib
import argparse
import datetime
import numpy as np
import pandas as pd
def create_regression_data(shape_data, distance_data):
"""Combines the shape and distance data into a single file for regressions.
Args:
shape_data: a csv file containing shape data
distance_data: a csv file containing distance data
Returns:
pandas.DataFrame: contains shape data
"""
shape_data = pd.read_csv(shape_data, low_memory=True)
distance_data = pd.read_csv(distance_data, low_memory=True)
regression_data = shape_data \
.merge(distance_data, on="shape_id") \
.loc[:, ["shape_id", "route_id", "route_type", "route_color", "times_taken", "distance"]]
return regression_data
def collect_shape_data(gtfs_dir):
"""Calculate the number of times a shape (line on a map) is travelled.
Appends some additional information about the route that the shape belongs to.
Args:
gtfs_dir: the directory where the GTFS file is extracted
Returns:
pandas.DataFrame: contains shape data
"""
gtfs_dir = pathlib.Path(gtfs_dir)
service_days = calculate_service_days(gtfs_dir)
trips = pd.read_csv(gtfs_dir / 'trips.txt', index_col=2)
routes = pd.read_csv(gtfs_dir / 'routes.txt', index_col=0)
route_id_diffs = trips \
.groupby('shape_id') \
.aggregate({'route_id': [min, max]})
if any(route_id_diffs[('route_id', 'min')] != route_id_diffs[('route_id', 'max')]):
raise ValueError("Shape ids must uniquely identify route_ids")
route_info = trips \
.join(service_days, on="service_id", how="left") \
.groupby(["shape_id"]) \
.aggregate({'days': sum, 'route_id': 'first'}) \
.rename(columns={'days': 'times_taken'}) \
.join(
routes[['route_short_name', 'route_type', 'route_color']],
on="route_id", how="left"
) \
.reset_index()
return route_info
def calculate_service_days(gtfs_dir):
"""Calculate the number of active days for each service.
Args:
gtfs_dir: the directory where the GTFS file is extracted
Returns:
pandas.DataFrame: contains day counts by service_id
"""
gtfs_dir = pathlib.Path(gtfs_dir)
calendar = pd.read_csv(gtfs_dir / 'calendar.txt', index_col=0)
calendar_dates = pd.read_csv(gtfs_dir / 'calendar_dates.txt')
validity_weeks = calculate_validity_weeks(calendar)
regular_number_of_days = calendar \
.loc[:, ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']] \
.sum(axis=1) \
* validity_weeks
exception_key = pd.DataFrame({
"exception_type": [1, 2],
"day_diff": [1, -1]
})
irregular_number_of_days = calendar_dates \
.merge(exception_key, on="exception_type", how="left") \
.groupby("service_id") \
.sum()
number_of_days = regular_number_of_days \
.to_frame(name="regular_days") \
.join(irregular_number_of_days[["day_diff"]], how="outer") \
.fillna(value=0) \
.assign(days=lambda df: df.regular_days + df.day_diff)
if number_of_days.days.min() < 0:
raise ValueError("Number of days a service operates on cannot be negative.")
return number_of_days
def calculate_validity_weeks(calendar):
"""Calculate the validity of the calendar in weeks.
Args:
calendar: a pandas.DataFrame containing the contents of calendar.txt
Returns:
int: the validity of the timetable in weeks
"""
if calendar.start_date.min() != calendar.start_date.max() or \
calendar.end_date.min() != calendar.end_date.max():
raise ValueError("Non-uniform timetable validities are not handled yet.")
start_date = datetime.datetime.strptime(
str(calendar.start_date[0]),
'%Y%m%d'
)
end_date = datetime.datetime.strptime(
str(calendar.end_date[0]),
'%Y%m%d'
)
validity_days = (end_date - start_date).days + 1
if validity_days % 7 != 0:
raise ValueError("Non-integer weeks are not handled yet.")
return validity_days // 7
def generate_plot_data(gtfs_dir, shape_data):
"""Generates a dataset suitable for line plots using datashader.
Args:
gtfs_dir: the directory where the GTFS file is extracted
shape_data: additional shape data that is needed for the plotting
Returns:
pandas.DataFrame: a DataFrame that is used for line plots
"""
gtfs_dir = pathlib.Path(gtfs_dir)
shapes = pd.read_csv(gtfs_dir / 'shapes.txt')
plotting_data = insert_empty_rows(shapes, 'shape_id') \
.merge(shape_data, on='shape_id', how='left')
return plotting_data
def insert_empty_rows(df, by):
""" Inserts a row filled with NaNs between each chunk of the dataframe
separated by a variable. The resulting dataset is suitable for line plots.
Args:
df: a pandas.DataFrame
by: a column of `df` that will be used for the grouping
Returns:
pandas.DataFrame: the same as the input `df` with empty lines inserted
between groups
"""
df_parts = []
for level, df_part in df.groupby(by):
empty = pd.DataFrame(
[[level if colname == by else np.NaN for colname in df.columns]],
columns=df.columns
)
df_parts.append(df_part.append(empty))
return | pd.concat(df_parts) | pandas.concat |
'''
@author: <NAME>
@date: 7/16/2019
@title: HNSCC Plate Mapper
Takes a csv file with [lab_id, inhibitor, conc, avg_opt_density]
Assumes normalization steps have already been applied to dataset
XXX
Run the file by
$ python probit_calc.py ./path/to/data.csv
results are saved to ./data/single_drug_probit_fit_results.csv
TODO
- Perfect separation fails, need work around
'''
import sys
import os
import pandas as pd
import statsmodels.api as sm
import numpy as np
import seaborn as sbn
from matplotlib import pyplot as plt
pd.options.display.width = 0
# comment this to False - will plot failures
DIAGNOSTICS = True
if __name__ == '__main__':
path = sys.argv[1]
print('Data path: %s' %path)
data = pd.read_csv(path, sep=',')
print(data.head())
failures = []
res = {x:[] for x in ['lab_id', 'inhibitor','beta0', 'beta1', 'auc']}
i = 0
for patient in set(data['lab_id'].values):
pat_dat = data[data['lab_id'] == patient]
print('Fitting probit | n = %d | lab_id = %s | n_failures = %d' %(i, patient, len(failures) ), end='\r')
for inhib in set(pat_dat['inhibitor'].values):
i+=1
df = pat_dat[pat_dat['inhibitor'] == inhib]
print()
print('shape: %s' %str(df.shape))
print('inhib: %s' %inhib)
assert df.shape[0] % 7 == 0, 'wrong number of doses, must be multiple of 7'
try:
print(df.head())
print(df['conc'].values)
# 'conc' variables haven't separated combination data yet, so they are stored as strings.
x = sm.add_constant( np.log10( df['conc'].values ))
y = df['avg.opt.density'].values
pr = sm.GLM(y, x, family=sm.families.Binomial(link=sm.families.links.probit()))
glm_res = pr.fit(disp=False)
# AUC calculation -----------------------------------------------------
# left rectangle auc estimate
delta = 0.001
x2 = np.arange(np.log10(min(df['conc'].values)), np.log10(max(df['conc'].values)), delta)
yhat = glm_res.predict(sm.add_constant(x2))
auc = np.sum(yhat*delta)
if (DIAGNOSTICS):
plt.figure()
plt.plot(x2, yhat, 'r-', label='probit_fit')
plt.plot(x,y, 'bo', label='replicates')
plt.legend()
plt.show()
# beta0 = intercept
# beta1 = slope
(beta0,beta1) = glm_res.params
# update results
[res[var].append(val) for var,val in zip(['lab_id', 'inhibitor','beta0', 'beta1', 'auc'], [patient, inhib, beta0 ,beta1 , auc])]
except:
failures.append( (patient, inhib) )
[res[var].append(val) for var,val in zip(['lab_id', 'inhibitor','beta0', 'beta1', 'auc'], [patient, inhib, 'NA' ,'NA' , 'NA'])]
if DIAGNOSTICS:
print('FAILURE: %s, %s' %(patient, inhib))
print(df.head(7))
f, ax = plt.subplots(1,1, figsize = (10,10))
ax.set_title('FAILURE: %s, %s' %(patient, inhib))
#sbn.scatterplot(x=x2, y=yhat , ax=ax)
plt.xscale('log')
sbn.scatterplot(x=np.log10(df['conc'].values), y=df['avg.opt.density'].values, ax=ax)
plt.show()
print('Failures [%d]: %r' %(len(failures),failures))
res = | pd.DataFrame(res) | pandas.DataFrame |
import json
import math
import unittest
from monty.json import MontyEncoder
from unittest import TestCase
from pandas import DataFrame, MultiIndex
from pymatgen.core.structure import IStructure
from pymatgen.core import Composition, Lattice, Structure, Element, SETTINGS
from matminer.featurizers.conversions import (
StrToComposition, StructureToComposition, StructureToIStructure,
DictToObject, JsonToObject, StructureToOxidStructure,
CompositionToOxidComposition, CompositionToStructureFromMP)
class TestConversions(TestCase):
def test_conversion_overwrite(self):
# Test with overwrite
d = {'comp_str': ["Fe2", "MnO2"]}
df = DataFrame(data=d)
stc = StrToComposition(target_col_id='comp_str', overwrite_data=False)
with self.assertRaises(ValueError):
df = stc.featurize_dataframe(df, 'comp_str', inplace=True)
with self.assertRaises(ValueError):
df = stc.featurize_dataframe(df, 'comp_str', inplace=False)
stc = StrToComposition(target_col_id='comp_str', overwrite_data=True)
dfres_ipt = df.copy()
stc.featurize_dataframe(dfres_ipt, 'comp_str', inplace=True)
self.assertListEqual(dfres_ipt.columns.tolist(), ["comp_str"])
dfres_ipf = stc.featurize_dataframe(df, 'comp_str', inplace=False)
self.assertListEqual(dfres_ipf.columns.tolist(), ["comp_str"])
def test_str_to_composition(self):
d = {'comp_str': ["Fe2", "MnO2"]}
df = DataFrame(data=d)
df = StrToComposition().featurize_dataframe(df, 'comp_str')
self.assertEqual(df["composition"].tolist(),
[Composition("Fe2"), Composition("MnO2")])
stc = StrToComposition(reduce=True, target_col_id='composition_red')
df = stc.featurize_dataframe(df, 'comp_str')
self.assertEqual(df["composition_red"].tolist(),
[Composition("Fe"), Composition("MnO2")])
def test_structure_to_composition(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Si"] * 2, coords)
df = DataFrame(data={'structure': [struct]})
stc = StructureToComposition()
df = stc.featurize_dataframe(df, 'structure')
self.assertEqual(df["composition"].tolist()[0], Composition("Si2"))
stc = StructureToComposition(reduce=True,
target_col_id='composition_red')
df = stc.featurize_dataframe(df, 'structure')
self.assertEqual(df["composition_red"].tolist()[0], Composition("Si"))
def test_dict_to_object(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Si"] * 2, coords)
d = {'structure_dict': [struct.as_dict(), struct.as_dict()]}
df = DataFrame(data=d)
dto = DictToObject(target_col_id='structure')
df = dto.featurize_dataframe(df, 'structure_dict')
self.assertEqual(df["structure"].tolist()[0], struct)
self.assertEqual(df["structure"].tolist()[1], struct)
# test dynamic target_col_id setting
df = DataFrame(data=d)
dto = DictToObject()
df = dto.featurize_dataframe(df, 'structure_dict')
self.assertEqual(df["structure_dict_object"].tolist()[0], struct)
self.assertEqual(df["structure_dict_object"].tolist()[1], struct)
def test_json_to_object(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Si"] * 2, coords)
struct_json = json.dumps(struct, cls=MontyEncoder)
d = {'structure_json': [struct_json]}
df = DataFrame(data=d)
jto = JsonToObject(target_col_id='structure')
df = jto.featurize_dataframe(df, 'structure_json')
self.assertEqual(df["structure"].tolist()[0], struct)
# test dynamic target_col_id setting
df = DataFrame(data=d)
jto = JsonToObject()
df = jto.featurize_dataframe(df, 'structure_json')
self.assertEqual(df["structure_json_object"].tolist()[0], struct)
def test_structure_to_oxidstructure(self):
cscl = Structure(Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl", "Cs"], [[0.45, 0.5, 0.5], [0, 0, 0]])
d = {'structure': [cscl]}
df = DataFrame(data=d)
sto = StructureToOxidStructure()
df = sto.featurize_dataframe(df, 'structure')
self.assertEqual(df["structure_oxid"].tolist()[0][0].specie.oxi_state,
-1)
self.assertEqual(df["structure_oxid"].tolist()[0][1].specie.oxi_state,
+1)
sto = StructureToOxidStructure(target_col_id='structure_oxid2',
oxi_states_override={"Cl": [-2],
"Cs": [+2]})
df = sto.featurize_dataframe(df, 'structure')
self.assertEqual(df["structure_oxid2"].tolist()[0][0].specie.oxi_state,
-2)
self.assertEqual(df["structure_oxid2"].tolist()[0][1].specie.oxi_state,
+2)
# original is preserved
self.assertEqual(df["structure"].tolist()[0][0].specie, Element("Cl"))
# test in-place
sto = StructureToOxidStructure(target_col_id=None, overwrite_data=True)
df = sto.featurize_dataframe(df, 'structure')
self.assertEqual(df["structure"].tolist()[0][0].specie.oxi_state, -1)
# test error handling
test_struct = Structure([5, 0, 0, 0, 5, 0, 0, 0, 5], ['Sb', 'F', 'O'],
[[0, 0, 0], [0.2, 0.2, 0.2], [0.5, 0.5, 0.5]])
df = DataFrame(data={'structure': [test_struct]})
sto = StructureToOxidStructure(return_original_on_error=False,
max_sites=2)
self.assertRaises(ValueError, sto.featurize_dataframe, df,
'structure')
# check non oxi state structure returned correctly
sto = StructureToOxidStructure(return_original_on_error=True,
max_sites=2)
df = sto.featurize_dataframe(df, 'structure')
self.assertEqual(df["structure_oxid"].tolist()[0][0].specie,
Element("Sb"))
def test_composition_to_oxidcomposition(self):
df = DataFrame(data={"composition": [Composition("Fe2O3")]})
cto = CompositionToOxidComposition()
df = cto.featurize_dataframe(df, 'composition')
self.assertEqual(df["composition_oxid"].tolist()[0],
Composition({"Fe3+": 2, "O2-": 3}))
# test error handling
df = DataFrame(data={"composition": [Composition("Fe2O3")]})
cto = CompositionToOxidComposition(
return_original_on_error=False, max_sites=2)
self.assertRaises(ValueError, cto.featurize_dataframe, df,
'composition')
# check non oxi state structure returned correctly
cto = CompositionToOxidComposition(
return_original_on_error=True, max_sites=2)
df = cto.featurize_dataframe(df, 'composition')
self.assertEqual(df["composition_oxid"].tolist()[0],
Composition({"Fe": 2, "O": 3}))
def test_to_istructure(self):
cscl = Structure(Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl", "Cs"], [[0.45, 0.5, 0.5], [0, 0, 0]])
df = DataFrame({"structure": [cscl]})
# Run the conversion
sti = StructureToIStructure()
df = sti.featurize_dataframe(df, 'structure')
# Make sure the new structure is an IStructure, and equal
# to the original structure
self.assertIsInstance(df["istructure"][0], IStructure)
self.assertEqual(df["istructure"][0], df["structure"][0])
def test_conversion_multiindex(self):
d = {'comp_str': ["Fe2", "MnO2"]}
df_1lvl = DataFrame(data=d)
df_1lvl = StrToComposition().featurize_dataframe(
df_1lvl, 'comp_str', multiindex=True)
self.assertEqual(df_1lvl[("StrToComposition", "composition")].tolist(),
[Composition("Fe2"), Composition("MnO2")])
df_2lvl = | DataFrame(data=d) | pandas.DataFrame |
# coding: utf-8
"""ๅบไบHDFๆไปถ็ๆฐๆฎๅบ"""
import pandas as pd
import numpy as np
import os
import warnings
from multiprocessing import Lock
from ..utils.datetime_func import Datetime2DateStr, DateStr2Datetime
from ..utils.tool_funcs import ensure_dir_exists
from ..utils.disk_persist_provider import DiskPersistProvider
from .helpers import handle_ids, FIFODict
from pathlib import Path
from FactorLib.utils.tool_funcs import is_non_string_iterable
pd.options.compute.use_numexpr = True
lock = Lock()
warnings.simplefilter('ignore', category=FutureWarning)
def append_along_index(df1, df2):
df1, df2 = df1.align(df2, axis='columns')
new = pd.DataFrame(np.vstack((df1.values, df2.values)),
columns=df1.columns,
index=df1.index.append(df2.index))
new.sort_index(inplace=True)
return new
def auto_increase_keys(_dict, keys):
if _dict:
max_v = max(_dict.values())
else:
max_v = 0
for key in keys:
if key not in _dict:
max_v += 1
_dict[key] = max_v
return _dict
class H5DB(object):
def __init__(self, data_path, max_cached_files=30):
self.data_path = str(data_path)
self.feather_data_path = os.path.abspath(self.data_path+'/../feather')
self.csv_data_path = os.path.abspath(self.data_path+'/../csv')
self.data_dict = None
self.cached_data = FIFODict(max_cached_files)
self.max_cached_files = max_cached_files
# self._update_info()
def _update_info(self):
factor_list = []
for root, subdirs, files in os.walk(self.data_path):
relpath = "/%s/"%os.path.relpath(root, self.data_path).replace("\\", "/")
for file in files:
if file.endswith(".h5"):
factor_list.append([relpath, file[:-3]])
self.data_dict = pd.DataFrame(
factor_list, columns=['path', 'name'])
def _read_h5file(self, file_path, key):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
data = pd.read_hdf(file_path, key)
except KeyError:
data = pd.read_hdf(file_path, 'data')
finally:
lock.release()
# update at 2020.02.15: surpport wide dataframe
columns_mapping = self._read_columns_mapping(file_path)
if not columns_mapping.empty:
data.rename(
columns=pd.Series(columns_mapping.index, index=columns_mapping.to_numpy()),
inplace=True
)
if self.max_cached_files > 0:
self.cached_data[file_path] = data
return data
def _read_columns_mapping(self, file_path):
try:
data = pd.read_hdf(file_path, 'column_name_mapping')
except KeyError:
data = pd.Series()
return data
def _normalize_columns(self, input, column_mapping):
return column_mapping[column_mapping.index.isin(input)].tolist()
def _save_h5file(self, data, file_path, key,
complib='blosc', complevel=9,
mode='w', **kwargs):
try:
lock.acquire()
# update at 2020.02.15: surpport wide dataframe
if data.shape[1] > 1000:
columns_mapping = {x:y for x, y in zip(data.columns, range(data.shape[1]))}
data2 = data.rename(columns=columns_mapping)
else:
data2 = data
columns_mapping = {}
with pd.HDFStore(file_path, mode=mode, complevel=complevel,
complib=complib) as f:
f.put(key, data2, **kwargs)
f.put('column_name_mapping', | pd.Series(columns_mapping) | pandas.Series |
#/usr/bin/env python3
import numpy as np
import pandas as pd
def series():
#podemos hacer arrays numpy de tiempo np.array('2019-10-12',dtype=np.datetime64)
#para ello contamos con el paquete pandas de python
index= | pd.DatetimeIndex(['2000-10-12','2000-10-24','2001-12-12']) | pandas.DatetimeIndex |
#!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
from __future__ import print_function, absolute_import
import numpy as np
import pandas as pd
from collections import defaultdict, Counter
from itertools import combinations, product
def _chi_squared(count_matrix: np.ndarray, n_obs: int) -> float:
"""
Computes Chi-squared when given a contingency table
"""
row_sums = np.tile(np.sum(count_matrix, axis=1), (count_matrix.shape[1], 1)).T
col_sums = np.tile(np.sum(count_matrix, axis=0), (count_matrix.shape[0], 1))
return np.sum(
np.square(count_matrix - row_sums * col_sums / n_obs)
/ (row_sums * col_sums / n_obs)
)
def _cramers_v(cat1: np.ndarray, cat2: np.ndarray) -> float:
"""
Calculates the cramers v of two numpy arrays.
"""
keep_cat1 = ~pd.isnull(cat1)
cat1_no_nan = cat1[keep_cat1]
cat2_no_nan = cat2[keep_cat1]
keep_cat2 = ~pd.isnull(cat2_no_nan)
cat1_no_nan = cat1_no_nan[keep_cat2]
cat2_no_nan = cat2_no_nan[keep_cat2]
n = len(cat1_no_nan)
if n == 1:
return 0
contingency_table, r, k = _count_occurrence(cat1_no_nan, cat2_no_nan)
if r == 0:
return 0.0000
chi2 = _chi_squared(contingency_table, n)
phi2 = chi2 / n
phi2corr = max(0, phi2 - ((k - 1) * (r - 1)) / (n - 1))
rcorr = r - (np.square(r - 1)) / (n - 1)
kcorr = k - (np.square(k - 1)) / (n - 1)
denominator = min((kcorr - 1), (rcorr - 1))
if denominator == 0:
return np.nan
return np.sqrt(phi2corr / denominator)
def _list_to_dataframe(
name_list: list, corr_list: list, normal_form: bool
) -> pd.DataFrame:
corr_dict = defaultdict(dict)
for idx, corr in zip(name_list, corr_list):
row_name = idx[0]
col_name = idx[1]
corr_dict[row_name][col_name] = corr_dict[col_name][row_name] = round(corr, 4)
corr_dict[row_name][row_name] = corr_dict[col_name][col_name] = 1.0000
correlation_matrix = pd.DataFrame.from_dict(corr_dict).sort_index()
correlation_matrix = correlation_matrix.loc[:, correlation_matrix.index]
if normal_form:
data = []
for (col1, col2), corr in correlation_matrix.stack().iteritems():
data.append([col1, col2, round(corr, 4)])
return | pd.DataFrame(data, columns=["Column 1", "Column 2", "Value"]) | pandas.DataFrame |
import pandas as pd
import pytest
from rdtools.normalization import normalize_with_expected_power
from pandas import Timestamp
import numpy as np
@pytest.fixture()
def times_15():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T')
@pytest.fixture()
def times_30():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='30T')
@pytest.fixture()
def pv_15(times_15):
return pd.Series([1.0, 2.5, 3.0, 2.2, 2.1], index=times_15)
@pytest.fixture()
def expected_15(times_15):
return pd.Series([1.2, 2.3, 2.8, 2.1, 2.0], index=times_15)
@pytest.fixture()
def irradiance_15(times_15):
return pd.Series([1000.0, 850.0, 950.0, 975.0, 890.0], index=times_15)
@pytest.fixture()
def pv_30(times_30):
return pd.Series([1.0, 3.0, 2.1], index=times_30)
@pytest.fixture()
def expected_30(times_30):
return pd.Series([1.2, 2.8, 2.0], index=times_30)
@pytest.fixture()
def irradiance_30(times_30):
return pd.Series([1000.0, 950.0, 890.0], index=times_30)
def test_normalize_with_expected_power_uniform_frequency(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 1.0,
Timestamp('2020-01-01 12:30:00', freq='15T'): 1.0784313725490198,
Timestamp('2020-01-01 12:45:00', freq='15T'): 1.0612244897959184,
Timestamp('2020-01-01 13:00:00', freq='15T'): 1.0487804878048783}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_energy_option(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15, pv_input='energy')
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:00:00', freq='15T'): np.nan,
Timestamp('2020-01-01 12:15:00', freq='15T'): 5.714285714285714,
Timestamp('2020-01-01 12:30:00', freq='15T'): 4.705882352941177,
Timestamp('2020-01-01 12:45:00', freq='15T'): 3.5918367346938775,
Timestamp('2020-01-01 13:00:00', freq='15T'): 4.097560975609756}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:00:00', freq='15T'): np.nan,
Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_low_freq_pv(pv_30, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_30, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:30:00', freq='30T'): 0.9302325581395349,
Timestamp('2020-01-01 13:00:00', freq='30T'): 1.1333333333333333}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '30T'
expected_insol = pd.Series(
{ | Timestamp('2020-01-01 12:30:00', freq='30T') | pandas.Timestamp |
import os
# import tensorflow as tf
import math
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
from matplotlib.lines import Line2D
import matplotlib.patheffects as path_effects
import matplotlib as mpl
import cv2
import glob
from scipy.interpolate import CubicSpline
import scipy.interpolate
from scipy import signal
import scipy.stats as stats
import seaborn as sns
from sklearn.linear_model import LinearRegression
from gekko import GEKKO
import pywt
# import waymo dataset related modules
from waymo_open_dataset import dataset_pb2 as open_dataset
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import frame_utils
def show_camera_image(camera_image, camera_labels, layout, cmap=None):
# Show a camera image and the given camera labels (if avaliable)
ax = plt.subplot(*layout)
# Draw the camera labels.
for camera_labels in frame.camera_labels:
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name != camera_image.name:
continue
# Iterate over the individual labels.
for label in camera_labels.labels:
# Draw the object bounding box.
ax.add_patch(patches.Rectangle(
xy=(label.box.center_x - 0.5 * label.box.length,
label.box.center_y - 0.5 * label.box.width),
width=label.box.length,
height=label.box.width,
linewidth=1,
edgecolor='red',
facecolor='none'))
# Show the camera image.
frame_image = plt.imshow(tf.image.decode_jpeg(camera_image.image), cmap=cmap, animated=True)
plt.title(open_dataset.CameraName.Name.Name(camera_image.name))
plt.grid(False)
plt.axis('off')
return frame_image
def camera_video_generation():
img_array = []
for num in range(1, len(os.listdir('figure_save/temp_cam_pic/')) + 1):
image_filename = 'figure_save/temp_cam_pic/' + 'frame_' + str(num) + '.jpg'
img = cv2.imread(image_filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
video_save_name = 'figure_save/cam_video/' + 'camera_video_segment_' + str(segment_id) + '.avi'
out = cv2.VideoWriter(video_save_name, cv2.VideoWriter_fourcc(*'DIVX'), 10, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print('camera video made success')
# after making the video, delete all the frame jpgs
filelist = glob.glob(os.path.join('figure_save/temp_cam_pic/', "*.jpg"))
for f in filelist:
os.remove(f)
def frame_context_update(frame_in):
# collect environment context in this frame
frame_context_dict = {}
frame_context_dict['segment_id'] = segment_id
frame_context_dict['frame_label'] = frame_label
frame_context_dict['time_of_day'] = frame_in.context.stats.time_of_day
frame_context_dict['location'] = frame_in.context.stats.location
frame_context_dict['weather'] = frame_in.context.stats.weather
for count in frame_in.context.stats.laser_object_counts:
if count.type != 1: # note that 1 means vehicle object
continue
frame_context_dict['laser_veh_count'] = count.count
return frame_context_dict
def collect_lidar_veh_label(single_lidar_label, row_dict, ego_dict, ego_pose):
# this function extract the information of a single object (Lidar label)
# note that the original position and heading in label is in local coordinate
# single_lidar_label is from lidar label from original data
# row_dict is an initialized dictionary that will be filled
# global unique object_id
row_dict['obj_id'] = single_lidar_label.id
row_dict['local_center_x'] = single_lidar_label.box.center_x
row_dict['local_center_y'] = single_lidar_label.box.center_y
row_dict['local_center_z'] = single_lidar_label.box.center_z
# we need to use ego_dict and ego_pose to transform local label position to global position
# (in vehicle frame), it needs to be transformed to global frame
# make ego_pose in the form of transformation matrix
trans_matrix = np.reshape(np.array(ego_pose), (4, 4))
# print(trans_matrix)
local_pos_matrix = np.reshape(
np.array([row_dict['local_center_x'], row_dict['local_center_y'], row_dict['local_center_z'], 1]), (4, 1))
# print(local_pos_matrix)
label_global_pos = np.matmul(trans_matrix, local_pos_matrix)
# print(label_global_pos)
row_dict['global_center_x'] = label_global_pos[0][0]
row_dict['global_center_y'] = label_global_pos[1][0]
row_dict['global_center_z'] = label_global_pos[2][0]
row_dict['length'] = single_lidar_label.box.length
row_dict['width'] = single_lidar_label.box.width
row_dict['height'] = single_lidar_label.box.height
frame_ego_heading = ego_dict['heading']
row_dict['heading'] = single_lidar_label.box.heading + frame_ego_heading
row_dict['speed_x'] = single_lidar_label.metadata.speed_x
row_dict['speed_y'] = single_lidar_label.metadata.speed_y
row_dict['accel_x'] = single_lidar_label.metadata.accel_x
row_dict['accel_y'] = single_lidar_label.metadata.accel_y
# angular speed remains to be calculated
row_dict['angular_speed'] = 0
return row_dict
def veh_trj_collect(frame_in):
# this funtion collects all Lidar object information in current frame
# collect environment context in this frame
frame_context_dict = frame_context_update(frame)
# print(frame_context_dict)
ego_row_dict = frame_context_dict.copy() # add context info to every row
# collect ego (AV) vehicle's timestamp, position and speed
ego_row_dict['obj_type'] = 'vehicle'
ego_row_dict['obj_id'] = 'ego'
ego_row_dict['global_time_stamp'] = frame_in.timestamp_micros # unix time (in micro seconds)
# time referenced to segment start time
ego_row_dict['local_time_stamp'] = (frame_in.timestamp_micros - segment_start_time) / float(1000000) # in seconds
# self driving car's (sdc) global position and heading (yaw, pitch, roll)
sdc_pose = frame_in.pose.transform # the transformation matrix
# print(sdc_pose)
frame_images = frame_in.images
for image in frame_images:
# print(image.velocity)
ego_speed_x = image.velocity.v_x
ego_speed_y = image.velocity.v_y
ego_angular_speed = image.velocity.w_z
# only get speed from the front camera
break
# print(image.pose)
# ego_velocity = frame_in.images
# ego vehicle's local position will be 0, because itself is the origin
ego_row_dict['local_center_x'] = 0
ego_row_dict['local_center_y'] = 0
ego_row_dict['local_center_z'] = 0
# ego vehicle's global position is extracted from the transformation matrix
ego_row_dict['global_center_x'] = sdc_pose[3]
ego_row_dict['global_center_y'] = sdc_pose[7]
ego_row_dict['global_center_z'] = sdc_pose[11]
# note that the actual model of AV is 2019 Chrysler Pacifica Hybrid
# the dimensions for AV is length 5.18m, width 2.03m, height 1.78m
ego_row_dict['length'] = 5.18
ego_row_dict['width'] = 2.03
ego_row_dict['height'] = 1.78
ego_row_dict['heading'] = math.atan2(sdc_pose[4], sdc_pose[0])
ego_row_dict['speed_x'] = ego_speed_x
ego_row_dict['speed_y'] = ego_speed_y
# accleration remains to be calculated
ego_row_dict['accel_x'] = 0
ego_row_dict['accel_y'] = 0
ego_row_dict['angular_speed'] = ego_angular_speed
# print(ego_row_dict)
# add to final file
all_segment_all_frame_all_object_info.append(ego_row_dict)
# collect vehicle's info in the lidar label
for lidar_label in frame_in.laser_labels:
# labe object type and its correspoding
# TYPE_UNKNOWN = 0;
# TYPE_VEHICLE = 1;
# TYPE_PEDESTRIAN = 2;
# TYPE_SIGN = 3;
# TYPE_CYCLIST = 4;
if lidar_label.type in [1, 2, 4]:
temp_row_dict = ego_row_dict.copy()
if lidar_label.type == 1:
temp_row_dict['obj_type'] = 'vehicle'
elif lidar_label.type == 4:
temp_row_dict['obj_type'] = 'bicycle'
else:
temp_row_dict['obj_type'] = 'pedestrian'
temp_row_dict = collect_lidar_veh_label(lidar_label, temp_row_dict, ego_row_dict, sdc_pose)
# add to final file
all_segment_all_frame_all_object_info.append(temp_row_dict)
def final_trj_result_format():
# format the final output
global all_segment_all_frame_all_object_info_pd
all_segment_all_frame_all_object_info_pd['local_time_stamp'] = all_segment_all_frame_all_object_info_pd[
'local_time_stamp'].map('{:.2f}'.format)
all_segment_all_frame_all_object_info_pd['local_center_x'] = all_segment_all_frame_all_object_info_pd[
'local_center_x'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['local_center_y'] = all_segment_all_frame_all_object_info_pd[
'local_center_y'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['local_center_z'] = all_segment_all_frame_all_object_info_pd[
'local_center_z'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['global_center_x'] = all_segment_all_frame_all_object_info_pd[
'global_center_x'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['global_center_y'] = all_segment_all_frame_all_object_info_pd[
'global_center_y'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['global_center_z'] = all_segment_all_frame_all_object_info_pd[
'global_center_z'].map('{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['length'] = all_segment_all_frame_all_object_info_pd['length'].map(
'{:.2f}'.format)
all_segment_all_frame_all_object_info_pd['width'] = all_segment_all_frame_all_object_info_pd['width'].map(
'{:.2f}'.format)
all_segment_all_frame_all_object_info_pd['height'] = all_segment_all_frame_all_object_info_pd['height'].map(
'{:.2f}'.format)
all_segment_all_frame_all_object_info_pd['heading'] = all_segment_all_frame_all_object_info_pd['heading'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['speed_x'] = all_segment_all_frame_all_object_info_pd['speed_x'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['speed_y'] = all_segment_all_frame_all_object_info_pd['speed_y'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['accel_x'] = all_segment_all_frame_all_object_info_pd['accel_x'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['accel_y'] = all_segment_all_frame_all_object_info_pd['accel_y'].map(
'{:.4f}'.format)
all_segment_all_frame_all_object_info_pd['angular_speed'] = all_segment_all_frame_all_object_info_pd[
'angular_speed'].map('{:.4f}'.format)
def plot_top_view_ani_with_lidar_label(trj_in, seg_id_in, frame_id_in):
# this function plots one single frame of the top view video
# trj_in is a pandas with three columns(obj_id, frame_label, local_time_stamp, global_center_x, global_center_y, length, width, heading)
# trj_in is all the trajectories within one segment
# seg_id_in is the current segment id
trj_in['global_center_x'] = trj_in['global_center_x'] - trj_in['global_center_x'].min() #่ฟ่กๅๆ ๅ็น็ไธไธชๅนณ็งป
trj_in['global_center_y'] = trj_in['global_center_y'] - trj_in['global_center_y'].min()
unique_veh_id = pd.unique(trj_in['obj_id'])
plt.figure(figsize=(18, 13.5))
plt.figure()
plt.xlabel('global center x (m)', fontsize=10)
plt.ylabel('global center y (m)', fontsize=10)
plt.axis('square')
plt.xlim([trj_in['global_center_x'].min() - 1, trj_in['global_center_x'].max() + 1])
plt.ylim([trj_in['global_center_y'].min() - 1, trj_in['global_center_y'].max() + 1])
# max_range = max(trj_in['global_center_x'].max(), )
title_name = 'Segment ' + str(seg_id_in)
plt.title(title_name, loc='left')
plt.xticks(
np.arange(round(float(trj_in['global_center_x'].min())), round(float(trj_in['global_center_x'].max())), 10),
fontsize=5)
plt.yticks(
np.arange(round(float(trj_in['global_center_y'].min())), round(float(trj_in['global_center_y'].max())), 10),
fontsize=5)
ax = plt.gca()
# find out the global heading of ego vehicle first, use it to transform other vehicles' local heading to global heading
ego_veh_trj = trj_in.loc[trj_in['obj_id'] == 'ego', :]
ego_current_heading = ego_veh_trj.loc[ego_veh_trj['frame_label'] == frame_id, 'heading'].values[0]
# get all the trajectories until current frame
for signle_veh_id in unique_veh_id:
single_veh_trj = trj_in[trj_in['obj_id'] == signle_veh_id]
# print(single_veh_trj)
single_veh_trj = single_veh_trj[single_veh_trj['frame_label'] == frame_id_in]
# print(single_veh_trj)
if len(single_veh_trj) > 0:
ts = ax.transData
coords = [single_veh_trj['global_center_x'].iloc[0], single_veh_trj['global_center_y'].iloc[0]]
if single_veh_trj.iloc[0, 0] == 'ego':
veh_local_id = 0
temp_facecolor = 'red'
temp_alpha = 0.99
heading_angle = single_veh_trj['heading'].iloc[0] * 180 / np.pi
tr = mpl.transforms.Affine2D().rotate_deg_around(coords[0], coords[1], heading_angle) #ๅฏน่ฝฆ่พๆ็
ง่ชๅ่ง่ฟ่กๆ่ฝฌ
else:
# calculate vehicle's local id
veh_id_match_temp = veh_name_id_match[veh_name_id_match['obj_id'] == signle_veh_id]
if single_veh_trj['obj_type'].iloc[0] == 'vehicle':
# only vehicle has a local id
veh_local_id = veh_id_match_temp['local_id'].iloc[0]
if single_veh_trj['obj_type'].iloc[0] == 'vehicle':
temp_facecolor = 'blue'
elif single_veh_trj['obj_type'].iloc[0] == 'bicycle':
temp_facecolor = 'green'
else:
temp_facecolor = 'magenta'
temp_alpha = 0.5
heading_angle = single_veh_trj['heading'].iloc[0] * 180 / np.pi
# transform for other vehicles, note that the ego global heading should be added to current local heading
tr = mpl.transforms.Affine2D().rotate_deg_around(coords[0], coords[1], heading_angle)
t = tr + ts
# note that exact xy needs to to calculated
veh_length = single_veh_trj['length'].iloc[0]
veh_width = single_veh_trj['width'].iloc[0]
ax.add_patch(patches.Rectangle(
xy=(single_veh_trj['global_center_x'].iloc[0] - 0.5 * veh_length,
single_veh_trj['global_center_y'].iloc[0] - 0.5 * veh_width),
width=veh_length,
height=veh_width,
linewidth=0.1,
facecolor=temp_facecolor,
edgecolor='black',
alpha=temp_alpha,
transform=t))
# add vehicle local id for only vehicle object
if single_veh_trj['obj_type'].iloc[0] == 'vehicle':
temp_text = plt.text(single_veh_trj['global_center_x'].iloc[0],
single_veh_trj['global_center_y'].iloc[0], str(veh_local_id), style='italic',
weight='heavy', ha='center', va='center', color='white', rotation=heading_angle,
size=3)
temp_text.set_path_effects(
[path_effects.Stroke(linewidth=1, foreground='black'), path_effects.Normal()])
trj_save_name = 'figure_save/temp_top_view_figure/top_view_segment_' + str(seg_id_in) + '_frame_' + str(
frame_id_in) + '_trajectory.jpg'
plt.savefig(trj_save_name, dpi=600)
plt.close('all')
def top_view_video_generation():
# this function generates one top view video based on top view figures from one segment
img_array = []
for num in range(1, len(os.listdir('figure_save/temp_top_view_figure/')) + 1):
image_filename = 'figure_save/temp_top_view_figure/' + 'top_view_segment_' + str(
single_seg_id) + '_frame_' + str(num) + '_trajectory.jpg'
img = cv2.imread(image_filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
video_save_name = 'figure_save/top_view_video/' + 'animation_top_view_segment_' + str(single_seg_id) + '.avi'
out = cv2.VideoWriter(video_save_name, cv2.VideoWriter_fourcc(*'DIVX'), 10, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print('top view video made success')
# after making the video, delete all the frame jpgs
filelist = glob.glob(os.path.join('figure_save/temp_top_view_figure/', "*.jpg"))
for f in filelist:
os.remove(f)
def cumulated_dis_cal(coord_series_in, segment_id_in, veh_id_in, start_time_in):
# this function calculate the cumulated distance based on the given global coordinates,
# input coord_series_in: ['global_center_x', 'global_center_y', 'speed_x', 'speed_y']
# output coord_series_in: ['global_center_x', 'global_center_y', 'speed_x', 'speed_y', 'cumu_dis', 'speed', 'accer', 'filter_cumu_dis',
# 'filter_speed', 'filter_accer', 'speed_based_cumu_dis', 'speed_based_speed', 'speed_based_accer', 'speed_based_filter_cumu_dis',
# 'speed_based_filter_speed', 'speed_based_accer']
coord_series_in.reset_index(drop=True, inplace=True)
coord_series_in.loc[:, 'cumu_dis'] = float(0)
coord_series_in.loc[:, 'speed'] = float(0)
coord_series_in.loc[:, 'accer'] = float(0)
coord_series_in.loc[:, 'speed_based_cumu_dis'] = float(0)
coord_series_in.loc[:, 'speed_based_speed'] = float(0)
coord_series_in.loc[:, 'speed_based_accer'] = float(0)
coord_series_in.loc[:, 'speed_based_jerk'] = float(0)
# calculate distance for position based method, and speed for speed based method
for i in range(1, len(coord_series_in['global_center_x'])):
pre_x = coord_series_in['global_center_x'].iloc[i - 1]
pre_y = coord_series_in['global_center_y'].iloc[i - 1]
post_x = coord_series_in['global_center_x'].iloc[i]
post_y = coord_series_in['global_center_y'].iloc[i]
single_dis = math.sqrt((post_x - pre_x) ** 2 + (post_y - pre_y) ** 2)
coord_series_in.loc[i, 'cumu_dis'] = coord_series_in.loc[i - 1, 'cumu_dis'] + single_dis
for i in range(len(coord_series_in['global_center_x'])):
speed_based_speed = math.sqrt((coord_series_in.at[i, 'speed_x']) ** 2 + (coord_series_in.at[i, 'speed_y']) ** 2)
coord_series_in.loc[i, 'speed_based_speed'] = speed_based_speed
# calculate speed and acceleration position based method, distance and aceleration for speed based method
coord_series_in = update_speed_and_accer(coord_series_in, 0)
coord_series_in = speed_based_update_distance_and_accer(coord_series_in)
# trajectory correctness
# initialize filter_value
coord_series_in.loc[:, 'filter_cumu_dis'] = coord_series_in.loc[:, 'cumu_dis'].to_numpy()
coord_series_in.loc[:, 'filter_speed'] = coord_series_in.loc[:, 'speed'].to_numpy()
coord_series_in.loc[:, 'filter_accer'] = coord_series_in.loc[:, 'accer'].to_numpy()
coord_series_in.loc[:, 'filter_jerk'] = 0
coord_series_in = trajectory_correctness(coord_series_in, segment_id_in, veh_id_in, start_time_in)
return coord_series_in
def speed_based_update_distance_and_accer(series_in):
# this function calculate the distance, acceleration and jerk based on speed (for speed-based data)
# series_in is the same format as coord_series_in
# output is series_in with updated speed and accer
current_cumu_dis = 'speed_based_cumu_dis'
current_speed = 'speed_based_speed'
current_accer = 'speed_based_accer'
for i in range(1, len(series_in['global_center_x'])):
if i == 1:
series_in.loc[0, current_cumu_dis] = 0
series_in.loc[i, current_cumu_dis] = series_in.loc[i - 1, current_cumu_dis] + (
series_in.loc[i, current_speed] + series_in.loc[i - 1, current_speed]) * 0.5 * 0.1
else:
series_in.loc[i, current_cumu_dis] = series_in.loc[i - 1, current_cumu_dis] + (
series_in.loc[i, current_speed] + series_in.loc[i - 1, current_speed]) * 0.5 * 0.1
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_accer] = float(
series_in.at[i + 2, current_speed] - series_in.at[i, current_speed]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_accer] = float(
series_in.at[i, current_speed] - series_in.at[i - 2, current_speed]) / (float(0.2))
else:
series_in.at[i, current_accer] = float(
series_in.at[i + 1, current_speed] - series_in.at[i - 1, current_speed]) / (float(0.2))
current_jerk = 'speed_based_jerk'
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_jerk] = float(
series_in.at[i + 2, current_accer] - series_in.at[i, current_accer]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_jerk] = float(
series_in.at[i, current_accer] - series_in.at[i - 2, current_accer]) / (float(0.2))
else:
series_in.at[i, current_jerk] = float(
series_in.at[i + 1, current_accer] - series_in.at[i - 1, current_accer]) / (float(0.2))
return series_in
def update_speed_and_accer(series_in, filter_label):
# this function calculate the speed, accelearation, jerk based on position
# series_in is the same format as coord_series_in
# output is series_in with updated speed and accer
if filter_label == 1:
current_cumu_dis = 'filter_cumu_dis'
current_speed = 'filter_speed'
current_accer = 'filter_accer'
elif filter_label == 0:
current_cumu_dis = 'cumu_dis'
current_speed = 'speed'
current_accer = 'accer'
current_jerk = 'jerk'
else:
# label should be 2
current_cumu_dis = 'remove_outlier_cumu_dis'
current_speed = 'remove_outlier_speed'
current_accer = 'remove_outlier_accer'
current_jerk = 'remove_outlier_jerk'
# calculate speed
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_speed] = float(
series_in.at[i + 2, current_cumu_dis] - series_in.at[i, current_cumu_dis]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_speed] = float(
series_in.at[i, current_cumu_dis] - series_in.at[i - 2, current_cumu_dis]) / (float(0.2))
else:
series_in.at[i, current_speed] = float(
series_in.at[i + 1, current_cumu_dis] - series_in.at[i - 1, current_cumu_dis]) / (float(0.2))
# calculate accerleration
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_accer] = float(
series_in.at[i + 2, current_speed] - series_in.at[i, current_speed]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_accer] = float(
series_in.at[i, current_speed] - series_in.at[i - 2, current_speed]) / (float(0.2))
else:
series_in.at[i, current_accer] = float(
series_in.at[i + 1, current_speed] - series_in.at[i - 1, current_speed]) / (float(0.2))
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_jerk] = float(
series_in.at[i + 2, current_accer] - series_in.at[i, current_accer]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_jerk] = float(
series_in.at[i, current_accer] - series_in.at[i - 2, current_accer]) / (float(0.2))
else:
series_in.at[i, current_jerk] = float(
series_in.at[i + 1, current_accer] - series_in.at[i - 1, current_accer]) / (float(0.2))
return series_in
def plot_outlier_adjacent_trj(series_in, outlier_pos_in, first_pos_in, last_pos_in, segment_id_in, veh_id_in, start_time_in, comparison_label):
# plot the adjacent trajectory of the outlier (20 points)
outlier_time = round(start_time_in + outlier_pos_in * 0.1, 1)
included_index = np.arange(first_pos_in, last_pos_in + 1, dtype=int)
outlier_trj = series_in.loc[included_index, :]
outlier_trj.loc[:, 'local_time'] = np.array(included_index) * 0.1 + start_time_in
plt.subplot(3, 1, 1)
plt.plot(outlier_trj['local_time'], outlier_trj['cumu_dis'], '-*k', linewidth=0.25, label='Original', markersize=1.5)
if comparison_label == 1:
plt.plot(outlier_trj['local_time'], outlier_trj['remove_outlier_cumu_dis'], '-m', linewidth=0.25, label='Outliers Removed')
plt.legend(prop={'size': 6})
trj_title = 'Segment ' + str(int(segment_id_in)) + ' Vehicle' + str(
int(veh_id_in)) + ' Outlier at Time ' + str(outlier_time) + ' Removing'
else:
trj_title = 'Segment ' + str(int(segment_id_in)) + ' Vehicle' + str(
int(veh_id_in)) + ' Outlier at Time ' + str(outlier_time) + ' Pattern'
plt.ylabel('Position (m)')
plt.title(trj_title)
plt.subplot(3, 1, 2)
plt.plot(outlier_trj['local_time'], outlier_trj['speed'], '-*k', linewidth=0.5, label='Original', markersize=1.5)
if comparison_label == 1:
plt.plot(outlier_trj['local_time'], outlier_trj['remove_outlier_speed'], '-m', linewidth=0.5, label='Outliers Removed')
plt.legend(prop={'size': 6})
plt.ylabel('Speed (m/s)')
plt.ylim([0, 35])
plt.subplot(3, 1, 3)
plt.plot(outlier_trj['local_time'], outlier_trj['accer'], '-*k', linewidth=0.5, label='Original', markersize=1.5)
if comparison_label == 1:
plt.plot(outlier_trj['local_time'], outlier_trj['remove_outlier_accer'], '-m', linewidth=0.5, label='Outliers Removed')
plt.legend(prop={'size': 6})
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim([-15, 15])
trj_save_title = 'figure_save/trajectory_process/outlier_pattern_and_removing/' + trj_title + '.png'
plt.savefig(trj_save_title, dpi=600)
plt.close('all')
def outlier_removing_optimization_model(initial_state_in, last_state_in, num_points_in):
# note that the num_points_in includes the first and last points
# if the total number of interpolated points is n, then num_points_in = n + 2
# time interval is 0.1 second
# total number of time steps
max_acc = 5
min_acc = -8
total_steps = num_points_in
first_pos_in = initial_state_in[0]
first_speed_in = initial_state_in[1]
first_acc_in = initial_state_in[2]
last_pos_in = last_state_in[0]
last_speed_in = last_state_in[1]
last_acc_in = last_state_in[2]
# time interval in each step
time_interval = 0.1
# model = GEKKO() # Initialize gekko
model = GEKKO(remote=False) # Initialize gekko
# Use IPOPT solver (default)
model.options.SOLVER = 3
model.options.SCALING = 2
# Initialize variables
acc = [None] * total_steps # simulated acceleration
velocity = [None] * total_steps # simulated velocity
pos = [None] * total_steps # simulated position
for i in range(total_steps):
pos[i] = model.Var()
velocity[i] = model.Var()
velocity[i].lower = 0
acc[i] = model.Var(lb=min_acc, ub=max_acc)
min_sim_acc = model.Var()
max_sim_acc = model.Var()
model.Equation(pos[0] == first_pos_in)
model.Equation(velocity[0] == first_speed_in)
model.Equation(acc[0] == first_acc_in)
model.Equation(pos[total_steps - 1] == last_pos_in)
model.Equation(velocity[total_steps - 1] == last_speed_in)
model.Equation(acc[total_steps - 1] == last_acc_in)
for i in range(total_steps):
if 1 <= i <= total_steps - 1:
model.Equation(velocity[i] == velocity[i - 1] + acc[i - 1] * time_interval)
model.Equation(pos[i] == pos[i - 1] + 0.5 * (velocity[i] + velocity[i - 1]) * time_interval)
for i in range(total_steps):
model.Equation(min_sim_acc <= acc[i])
model.Equation(max_sim_acc >= acc[i])
# objective function: minimize the difference between max_sim_acc and min_sim_acc
model.Obj(max_sim_acc - min_sim_acc)
# model.options.IMODE = 2 # Steady state optimization
model.options.MAX_MEMORY = 5
model.solve(disp=False)
# solve_time = model.options.SOLVETIME
# extract values from Gekko type variables
acc_value = np.zeros(total_steps)
velocity_value = np.zeros(total_steps)
pos_value = np.zeros(total_steps)
for i in range(total_steps):
acc_value[i] = acc[i].value[0]
velocity_value[i] = velocity[i].value[0]
pos_value[i] = pos[i].value[0]
return pos_value, velocity_value, acc_value
def optimization_based_outlier_removing(series_in, first_pos_in, last_pos_in, min_acc_in, max_acc_in):
# given the position of the outlier, optimize its vicinity's trajectory
first_point_pos = first_pos_in
last_point_pos = last_pos_in
first_point_cumu_dis = series_in.at[first_point_pos, 'remove_outlier_cumu_dis']
first_point_speed = series_in.at[first_point_pos, 'remove_outlier_speed']
if series_in.at[first_point_pos, 'remove_outlier_accer'] <= min_acc_in:
first_point_acc = min_acc_in
elif series_in.at[first_point_pos, 'remove_outlier_accer'] >= max_acc_in:
first_point_acc = max_acc_in
else:
first_point_acc = series_in.at[first_point_pos, 'remove_outlier_accer']
first_point_state = [first_point_cumu_dis, first_point_speed, first_point_acc]
last_point_cumu_dis = series_in.at[last_point_pos, 'remove_outlier_cumu_dis']
last_point_speed = series_in.at[last_point_pos, 'remove_outlier_speed']
if series_in.at[last_point_pos, 'remove_outlier_accer'] <= min_acc_in:
last_point_acc = min_acc_in
elif series_in.at[last_point_pos, 'remove_outlier_accer'] >= max_acc_in:
last_point_acc = max_acc_in
else:
last_point_acc = series_in.at[last_point_pos, 'remove_outlier_accer']
last_point_state = [last_point_cumu_dis, last_point_speed, last_point_acc]
actual_total_related_points = last_point_pos - first_point_pos + 1
pos_result, speed_result, acc_result = outlier_removing_optimization_model(first_point_state, last_point_state, actual_total_related_points)
series_in.loc[first_point_pos:last_point_pos, 'remove_outlier_cumu_dis'] = pos_result
series_in = update_speed_and_accer(series_in, 2)
return series_in
def wavefilter(data):
# We will use the Daubechies(6) wavelet
daubechies_num = 6
wname = "db" + str(daubechies_num)
datalength = data.shape[0]
max_level = pywt.dwt_max_level(datalength, wname)
print('maximun level is: %s' % max_level)
# Initialize the container for the filtered data
# Decompose the signal
# coeff[0] is approximation coeffs, coeffs[1] is nth level detail coeff, coeff[-1] is first level detail coeffs
coeffs = pywt.wavedec(data, wname, mode='smooth', level=max_level)
# thresholding
for j in range(max_level):
coeffs[-j - 1] = np.zeros_like(coeffs[-j - 1])
# Reconstruct the signal and save it
filter_data = pywt.waverec(coeffs, wname, mode='smooth')
fdata = filter_data[0:datalength]
return fdata
def wavelet_filter(series_in):
remove_outlier_speed_signal = series_in.loc[:, 'remove_outlier_speed'].to_numpy()
wavelet_filter_speed = wavefilter(remove_outlier_speed_signal)
series_in.loc[:, 'wavelet_filter_speed'] = wavelet_filter_speed
series_in.loc[:, 'wavelet_filter_cumu_dis'] = None
series_in.loc[:, 'wavelet_filter_accer'] = None
series_in.loc[:, 'wavelet_filter_jerk'] = None
# update cumulative distance
for i in range(len(series_in['global_center_x'])):
if i == 0:
# start from the filtered value
series_in.loc[i, 'wavelet_filter_cumu_dis'] = 0 # initial pos should be 0
else:
series_in.loc[i, 'wavelet_filter_cumu_dis'] = series_in.loc[i - 1, 'wavelet_filter_cumu_dis'] + (
series_in.loc[i - 1, 'wavelet_filter_speed'] + series_in.loc[i, 'wavelet_filter_speed']) * 0.5 * 0.1
# update acceleration
current_speed = 'wavelet_filter_speed'
current_accer = 'wavelet_filter_accer'
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_accer] = float(
series_in.at[i + 2, current_speed] - series_in.at[i, current_speed]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_accer] = float(
series_in.at[i, current_speed] - series_in.at[i - 2, current_speed]) / (float(0.2))
else:
series_in.at[i, current_accer] = float(
series_in.at[i + 1, current_speed] - series_in.at[i - 1, current_speed]) / (float(0.2))
current_jerk = 'wavelet_filter_jerk'
for i in range(0, len(series_in['global_center_x'])):
if i == 0:
series_in.at[i, current_jerk] = float(
series_in.at[i + 2, current_accer] - series_in.at[i, current_accer]) / (float(0.2))
elif i == len(series_in['global_center_x']) - 1:
series_in.at[i, current_jerk] = float(
series_in.at[i, current_accer] - series_in.at[i - 2, current_accer]) / (float(0.2))
else:
series_in.at[i, current_jerk] = float(
series_in.at[i + 1, current_accer] - series_in.at[i - 1, current_accer]) / (float(0.2))
return series_in
def trajectory_correctness(coord_series_in, segment_id_in, veh_id_in, start_time_in):
# this function remove outliers and filter the trajectory
# input coord_series_in: ['global_center_x', 'global_center_y', 'cumu_dis', 'speed', 'accer']
# output coord_series_in: ['global_center_x', 'global_center_y', 'cumu_dis', 'speed', 'accer', 'filter_cumu_dis', 'filter_speed', 'filter_accer']
minimum_accer = -8
maximum_accer = 5
reference_points_num = 20
coord_series_in.reset_index(inplace=True, drop=True)
global all_outlier_record
# remove outliers in acceleration, note that cubic spline interpolation is implemented on distance
# initialize remove outlier results
coord_series_in.loc[:, 'remove_outlier_cumu_dis'] = coord_series_in.loc[:, 'cumu_dis']
coord_series_in.loc[:, 'remove_outlier_speed'] = coord_series_in.loc[:, 'speed']
coord_series_in.loc[:, 'remove_outlier_accer'] = coord_series_in.loc[:, 'accer']
# removing outliers should be conducted multiple times until there is no outlier
outlier_label = 1
while outlier_label:
outlier_label = 0
for m in range(len(coord_series_in['global_center_x'])):
if coord_series_in.at[m, 'remove_outlier_accer'] >= maximum_accer or coord_series_in.at[m, 'remove_outlier_accer'] <= minimum_accer:
print('Outlier info: Current segment: %s, vehicle id: %s, time: %s, position: %s' % (
segment_id_in, veh_id_in, round(m * 0.1 + start_time_in, 1), m))
single_outlier_record = pd.DataFrame(np.zeros((1, 3)), columns=['segment_id', 'local_veh_id', 'outlier_time'])
single_outlier_record.loc[0, 'segment_id'] = segment_id_in
single_outlier_record.loc[0, 'local_veh_id'] = veh_id_in
single_outlier_record.loc[0, 'outlier_time'] = start_time_in + 0.1 * m
all_outlier_record = all_outlier_record.append(single_outlier_record)
total_related_points = 20
first_point_pos = int(max(0, m - total_related_points / 2))
last_point_pos = int(min(len(coord_series_in.loc[:, 'remove_outlier_accer']) - 1, m + total_related_points / 2))
if first_point_pos == 0:
last_point_pos = first_point_pos + total_related_points
if last_point_pos == len(coord_series_in.loc[:, 'remove_outlier_accer']) - 1:
first_point_pos = last_point_pos - total_related_points
plot_outlier_adjacent_trj(coord_series_in, m, first_point_pos, last_point_pos, segment_id_in, veh_id_in, start_time_in, 0)
# the following pairs may not have feasible solutions during outlier removal
if segment_id_in == 191 and veh_id_in == 6:
pass
elif segment_id_in == 270 and veh_id_in == 4:
pass
elif segment_id_in == 276 and veh_id_in == 2:
pass
elif segment_id_in == 320 and veh_id_in == 1:
pass
elif segment_id_in == 406 and veh_id_in == 25:
pass
elif segment_id_in == 449 and veh_id_in == 41:
pass
elif segment_id_in == 450 and veh_id_in == 15:
pass
elif segment_id_in == 676 and veh_id_in == 15:
pass
elif segment_id_in == 769 and veh_id_in == 50:
pass
elif segment_id_in == 916 and veh_id_in == 4:
pass
elif segment_id_in == 968 and veh_id_in == 18:
pass
else:
coord_series_in = optimization_based_outlier_removing(coord_series_in, first_point_pos, last_point_pos, minimum_accer,
maximum_accer)
plot_outlier_adjacent_trj(coord_series_in, m, first_point_pos, last_point_pos, segment_id_in, veh_id_in, start_time_in, 1)
outlier_label = 0 # outlier still exsit in this loop
# implement wavelet filter after removing outliers
coord_series_in = wavelet_filter(coord_series_in)
# set the final filter results to the wavelet filte results
coord_series_in.loc[:, 'filter_cumu_dis'] = coord_series_in.loc[:, 'wavelet_filter_cumu_dis'].to_numpy()
coord_series_in.loc[:, 'filter_speed'] = coord_series_in.loc[:, 'wavelet_filter_speed'].to_numpy()
coord_series_in.loc[:, 'filter_accer'] = coord_series_in.loc[:, 'wavelet_filter_accer'].to_numpy()
coord_series_in.loc[:, 'filter_jerk'] = coord_series_in.loc[:, 'wavelet_filter_jerk'].to_numpy()
return coord_series_in
def before_and_after_remove_outlier_plot(trj_in):
current_seg_id = trj_in['segment_id'].iloc[0]
follower_id_in = trj_in['local_veh_id'].iloc[0]
if len(all_outlier_record) > 0:
current_seg_outlier_record = all_outlier_record.loc[
all_outlier_record['segment_id'] == current_seg_id, :]
current_seg_outlier_record_local_veh_id = current_seg_outlier_record.loc[:, 'local_veh_id'].to_numpy().astype(np.int32)
else:
current_seg_outlier_record_local_veh_id = []
if int(follower_id_in) in current_seg_outlier_record_local_veh_id:
plt.subplot(3, 1, 1)
plt.plot(trj_in['local_time'], trj_in['position'], '--k', linewidth=0.25, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_pos'], '-m', linewidth=0.25, label='Outliers Removed')
plt.ylabel('Position (m)')
plt.legend(prop={'size': 6})
trj_title = 'Segment ' + str(int(current_seg_id)) + ' Vehicle' + str(
int(follower_id_in)) + ' Before and After Removing Outliers'
plt.title(trj_title)
plt.subplot(3, 1, 2)
plt.plot(trj_in['local_time'], trj_in['speed'], '--k', linewidth=0.5, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_speed'], '-m', linewidth=0.5, label='Outliers Removed')
plt.ylabel('Speed (m/s)')
plt.legend(prop={'size': 6})
plt.ylim([0, 35])
plt.subplot(3, 1, 3)
plt.plot(trj_in['local_time'], trj_in['accer'], '--k', linewidth=0.5, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_accer'], '-m', linewidth=0.5, label='Outliers Removed')
plt.legend(prop={'size': 6})
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim([-15, 15])
trj_save_title = 'figure_save/trajectory_process/before_and_after_remove_outlier_plot/' + trj_title + '.png'
plt.savefig(trj_save_title, dpi=600)
plt.close('all')
def before_and_after_filtering_plot(trj_in):
current_seg_id = trj_in['segment_id'].iloc[0]
follower_id_in = trj_in['local_veh_id'].iloc[0]
if len(all_outlier_record) > 0:
current_seg_outlier_record = all_outlier_record.loc[
all_outlier_record['segment_id'] == current_seg_id, :]
current_seg_outlier_record_local_veh_id = current_seg_outlier_record.loc[:, 'local_veh_id'].to_numpy().astype(np.int32)
else:
current_seg_outlier_record_local_veh_id = []
if int(follower_id_in) in current_seg_outlier_record_local_veh_id:
plt.subplot(3, 1, 1)
plt.plot(trj_in['local_time'], trj_in['position'], '--k', linewidth=0.25, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_pos'], '-m', linewidth=0.25, label='Outliers Removed')
plt.plot(trj_in['local_time'], trj_in['wavelet_filter_pos'], '-*g', linewidth=0.25, label='Outliers Removed + Filtering', markersize=0.5)
plt.ylabel('Position (m)')
plt.legend(prop={'size': 6})
trj_title = 'Segment ' + str(int(current_seg_id)) + ' Vehicle' + str(
int(follower_id_in)) + ' Before and After Filtering'
plt.title(trj_title)
plt.subplot(3, 1, 2)
plt.plot(trj_in['local_time'], trj_in['speed'], '--k', linewidth=0.25, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_speed'], '-m', linewidth=0.25, label='Outliers Removed')
plt.plot(trj_in['local_time'], trj_in['wavelet_filter_speed'], '-*g', linewidth=0.25, label='Outliers Removed + Filtering', markersize=0.5)
plt.ylabel('Speed (m/s)')
plt.legend(prop={'size': 6})
plt.ylim([0, 35])
plt.subplot(3, 1, 3)
plt.plot(trj_in['local_time'], trj_in['accer'], '--k', linewidth=0.25, label='Original')
plt.plot(trj_in['local_time'], trj_in['remove_outlier_accer'], '-m', linewidth=0.25, label='Outliers Removed')
plt.plot(trj_in['local_time'], trj_in['wavelet_filter_accer'], '-*g', linewidth=0.25, label='Outliers Removed + Filtering', markersize=0.5)
plt.legend(prop={'size': 6})
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim([-15, 15])
trj_save_title = 'figure_save/trajectory_process/before_and_after_filtering_plot/' + trj_title + '.png'
plt.savefig(trj_save_title, dpi=600)
plt.close('all')
def pair_cf_coord_cal(leader_id, leader_trj_in, follower_id, follower_trj_in, av_label):
# convert 2-d coordinates to 1-d longitudinal coordinates
# note that the leader and follower interacts with each other
# av_label is to determine whether av is leader or follower (0 for follower, 1 for leader, 2 for non-av pair)
global all_seg_paired_cf_trj_final
global all_seg_paired_cf_trj_with_comparison
# extract mutual cf trajectory
min_local_time = max(leader_trj_in['local_time_stamp'].min(), follower_trj_in['local_time_stamp'].min())
max_local_time = min(leader_trj_in['local_time_stamp'].max(), follower_trj_in['local_time_stamp'].max())
leader_trj_in = leader_trj_in.loc[leader_trj_in['local_time_stamp'] >= min_local_time, :]
leader_trj_in = leader_trj_in.loc[leader_trj_in['local_time_stamp'] <= max_local_time, :]
follower_trj_in = follower_trj_in.loc[follower_trj_in['local_time_stamp'] >= min_local_time, :]
follower_trj_in = follower_trj_in.loc[follower_trj_in['local_time_stamp'] <= max_local_time, :]
# sort the trj
leader_trj_in = leader_trj_in.sort_values(['local_time_stamp'])
follower_trj_in = follower_trj_in.sort_values(['local_time_stamp'])
# initialize output format
out_leader_trj = pd.DataFrame(leader_trj_in[['segment_id', 'veh_id', 'length', 'local_time_stamp']].to_numpy(),
columns=['segment_id', 'local_veh_id', 'length', 'local_time'])
out_leader_trj.loc[:, 'follower_id'] = follower_id
out_leader_trj.loc[:, 'leader_id'] = leader_id
out_follower_trj = pd.DataFrame(follower_trj_in[['segment_id', 'veh_id', 'length', 'local_time_stamp']].to_numpy(),
columns=['segment_id', 'local_veh_id', 'length', 'local_time'])
out_follower_trj.loc[:, 'follower_id'] = follower_id
out_follower_trj.loc[:, 'leader_id'] = leader_id
# calculate coordinates of leader and follower
temp_current_segment_id = out_follower_trj['segment_id'].iloc[0]
temp_start_time = out_follower_trj['local_time'].iloc[0]
leader_cumu_dis = cumulated_dis_cal(
leader_trj_in.loc[:, ['global_center_x', 'global_center_y', 'speed_x', 'speed_y']], temp_current_segment_id, leader_id, temp_start_time)
follower_cumu_dis = cumulated_dis_cal(
follower_trj_in.loc[:, ['global_center_x', 'global_center_y', 'speed_x', 'speed_y']], temp_current_segment_id, follower_id, temp_start_time)
# calculate initial distance
pre_x_1 = leader_trj_in['global_center_x'].iloc[0]
pre_y_1 = leader_trj_in['global_center_y'].iloc[0]
post_x_1 = follower_trj_in['global_center_x'].iloc[0]
post_y_1 = follower_trj_in['global_center_y'].iloc[0]
initial_dis = math.sqrt((post_x_1 - pre_x_1) ** 2 + (post_y_1 - pre_y_1) ** 2)
# create position, speed, and acceleration data
# follower's position always start from 0
# position based
out_follower_trj.loc[:, 'position'] = follower_cumu_dis['cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'remove_outlier_pos'] = follower_cumu_dis['remove_outlier_cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'filter_pos'] = follower_cumu_dis['filter_cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'wavelet_filter_pos'] = follower_cumu_dis['wavelet_filter_cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'speed'] = follower_cumu_dis['speed'].to_numpy()
out_follower_trj.loc[:, 'remove_outlier_speed'] = follower_cumu_dis['remove_outlier_speed'].to_numpy()
out_follower_trj.loc[:, 'filter_speed'] = follower_cumu_dis['filter_speed'].to_numpy()
out_follower_trj.loc[:, 'wavelet_filter_speed'] = follower_cumu_dis['wavelet_filter_speed'].to_numpy()
out_follower_trj.loc[:, 'accer'] = follower_cumu_dis['accer'].to_numpy()
out_follower_trj.loc[:, 'remove_outlier_accer'] = follower_cumu_dis['remove_outlier_accer'].to_numpy()
out_follower_trj.loc[:, 'filter_accer'] = follower_cumu_dis['filter_accer'].to_numpy()
out_follower_trj.loc[:, 'wavelet_filter_accer'] = follower_cumu_dis['wavelet_filter_accer'].to_numpy()
out_follower_trj.loc[:, 'jerk'] = follower_cumu_dis['jerk'].to_numpy()
out_follower_trj.loc[:, 'filter_jerk'] = follower_cumu_dis['filter_jerk'].to_numpy()
out_follower_trj.loc[:, 'wavelet_filter_jerk'] = follower_cumu_dis['wavelet_filter_jerk'].to_numpy()
out_leader_trj.loc[:, 'position'] = leader_cumu_dis['cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'remove_outlier_pos'] = leader_cumu_dis['remove_outlier_cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'filter_pos'] = leader_cumu_dis['filter_cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'wavelet_filter_pos'] = leader_cumu_dis['wavelet_filter_cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'speed'] = leader_cumu_dis['speed'].to_numpy()
out_leader_trj.loc[:, 'remove_outlier_speed'] = leader_cumu_dis['remove_outlier_speed'].to_numpy()
out_leader_trj.loc[:, 'filter_speed'] = leader_cumu_dis['filter_speed'].to_numpy()
out_leader_trj.loc[:, 'wavelet_filter_speed'] = leader_cumu_dis['wavelet_filter_speed'].to_numpy()
out_leader_trj.loc[:, 'accer'] = leader_cumu_dis['accer'].to_numpy()
out_leader_trj.loc[:, 'remove_outlier_accer'] = leader_cumu_dis['remove_outlier_accer'].to_numpy()
out_leader_trj.loc[:, 'filter_accer'] = leader_cumu_dis['filter_accer'].to_numpy()
out_leader_trj.loc[:, 'wavelet_filter_accer'] = leader_cumu_dis['wavelet_filter_accer'].to_numpy()
out_leader_trj.loc[:, 'jerk'] = leader_cumu_dis['jerk'].to_numpy()
out_leader_trj.loc[:, 'filter_jerk'] = leader_cumu_dis['filter_jerk'].to_numpy()
out_leader_trj.loc[:, 'wavelet_filter_jerk'] = leader_cumu_dis['wavelet_filter_jerk'].to_numpy()
# speed based
out_follower_trj.loc[:, 'speed_based_position'] = follower_cumu_dis['speed_based_cumu_dis'].to_numpy()
out_follower_trj.loc[:, 'speed_based_speed'] = follower_cumu_dis['speed_based_speed'].to_numpy()
out_follower_trj.loc[:, 'speed_based_accer'] = follower_cumu_dis['speed_based_accer'].to_numpy()
out_follower_trj.loc[:, 'speed_based_jerk'] = follower_cumu_dis['speed_based_jerk'].to_numpy()
out_leader_trj.loc[:, 'speed_based_position'] = leader_cumu_dis['speed_based_cumu_dis'].to_numpy() + initial_dis
out_leader_trj.loc[:, 'speed_based_speed'] = leader_cumu_dis['speed_based_speed'].to_numpy()
out_leader_trj.loc[:, 'speed_based_accer'] = leader_cumu_dis['speed_based_accer'].to_numpy()
out_leader_trj.loc[:, 'speed_based_jerk'] = leader_cumu_dis['speed_based_jerk'].to_numpy()
# plot speed and acc figure
before_and_after_remove_outlier_plot(out_follower_trj)
before_and_after_remove_outlier_plot(out_leader_trj)
before_and_after_filtering_plot(out_follower_trj)
before_and_after_filtering_plot(out_leader_trj)
# save cf paired trj
# all_seg_paired_cf_trj = pd.concat([all_seg_paired_cf_trj, pd.concat([out_leader_trj, out_follower_trj])])
all_seg_paired_cf_trj_with_comparison = all_seg_paired_cf_trj_with_comparison.append(
pd.concat([out_leader_trj, out_follower_trj]))
out_follower_trj_final = out_follower_trj.loc[:,
['segment_id', 'local_veh_id', 'length', 'local_time', 'follower_id', 'leader_id',
'filter_pos', 'filter_speed', 'filter_accer']]
out_follower_trj_final.columns = ['segment_id', 'local_veh_id', 'length', 'local_time', 'follower_id', 'leader_id',
'filter_pos', 'filter_speed', 'filter_accer']
out_leader_trj_final = out_leader_trj.loc[:,
['segment_id', 'local_veh_id', 'length', 'local_time', 'follower_id', 'leader_id',
'filter_pos', 'filter_speed', 'filter_accer']]
out_leader_trj_final.columns = ['segment_id', 'local_veh_id', 'length', 'local_time', 'follower_id', 'leader_id',
'filter_pos', 'filter_speed', 'filter_accer']
all_seg_paired_cf_trj_final = all_seg_paired_cf_trj_final.append(
pd.concat([out_leader_trj_final, out_follower_trj_final]))
# plot the car following trj of both follower and leader
cf_paired_trj_plot(out_leader_trj_final, out_follower_trj_final, av_label)
def cf_pair_coord_trans(seg_trj_in, follower_id_in, leader_id_in, av_related_label):
# extract all cf pairs in one segment
# the input seg_trj_in is already with local id
# av_related_label determines if there is av involed
# return the paired trj with transformed coordination with format of ['segment_id', 'local_veh_id', 'length','local_time','follower_id', 'leader_id', 'position', 'speed', 'accer']
follower_trj = seg_trj_in[seg_trj_in['veh_id'] == follower_id_in]
leader_trj = seg_trj_in[seg_trj_in['veh_id'] == leader_id_in]
ego_trj = seg_trj_in[seg_trj_in['veh_id'] == 0]
if av_related_label:
# process av related pair
if follower_id_in == 0 and leader_id_in == 0:
# this segment is not suitable for cf (av related)
pass
elif follower_id_in == 0 and leader_id_in != 0:
# AV-HV pair
pair_cf_coord_cal(leader_id_in, leader_trj, 0, ego_trj, 0)
elif follower_id_in != 0 and leader_id_in == 0:
# HV-AV pair
pair_cf_coord_cal(0, ego_trj, follower_id_in, follower_trj, 1)
else:
# both AV-HV pair and HV-AV pair
pair_cf_coord_cal(leader_id_in, leader_trj, 0, ego_trj, 0)
pair_cf_coord_cal(0, ego_trj, follower_id_in, follower_trj, 1)
else:
# process HV-HV pair
pair_cf_coord_cal(leader_id_in, leader_trj, follower_id_in, follower_trj, 2)
def cf_paired_trj_plot(leader_trj_in, follower_trj_in, av_label):
# av_label is to determine whether av is leader or follower (0 for follower, 1 for leader, 2 for non-av)
# the format of the trajectory is pandas dataframe
# for av_label: 0 means AV-HV, 1 means HV-AV, 2 means HV-HV
current_segment_id = int(leader_trj_in['segment_id'].iloc[0])
current_leader_id = int(leader_trj_in['local_veh_id'].iloc[0])
current_follower_id = int(follower_trj_in['local_veh_id'].iloc[0])
if av_label == 0:
follower_line = '-r'
leader_line = '--b'
follower_label = 'AV Follower'
leader_label = 'HV Leader'
trj_title = 'AV' + '-HV' + str(current_leader_id)
trj_save_title = 'figure_save/trajectory_process/position_time_plot/av_hv/' + 'Segment_' + str(
current_segment_id) + '_' + trj_title + '_position_time_plot.png'
elif av_label == 1:
follower_line = '-b'
leader_line = '--r'
follower_label = 'HV Follower'
leader_label = 'AV Leader'
trj_title = 'HV' + str(current_follower_id) + '-AV'
trj_save_title = 'figure_save/trajectory_process/position_time_plot/hv_av/' + 'Segment_' + str(
current_segment_id) + '_' + trj_title + '_position_time_plot.png'
else:
follower_line = '-b'
leader_line = '--b'
follower_label = 'HV Follower'
leader_label = 'HV Leader'
trj_title = 'HV' + str(current_follower_id) + '-HV' + str(current_leader_id)
trj_save_title = 'figure_save/trajectory_process/position_time_plot/hv_hv/' + 'Segment_' + str(
current_segment_id) + '_' + trj_title + '_position_time_plot.png'
plt.subplot(3, 1, 1)
plt.plot(follower_trj_in['local_time'], follower_trj_in['filter_pos'], follower_line, linewidth=0.5, label=follower_label)
plt.plot(leader_trj_in['local_time'], leader_trj_in['filter_pos'], leader_line, linewidth=0.5, label=leader_label)
plt.ylabel('Position (m)')
plt.legend(prop={'size': 6})
trj_title = 'Segment ' + str(current_segment_id) + ' ' + trj_title + ' Trajectory'
plt.title(trj_title)
plt.subplot(3, 1, 2)
plt.plot(follower_trj_in['local_time'], follower_trj_in['filter_speed'], follower_line, linewidth=0.5, label=follower_label)
plt.plot(leader_trj_in['local_time'], leader_trj_in['filter_speed'], leader_line, linewidth=0.5, label=leader_label)
plt.ylabel('Speed (m/s)')
plt.legend(prop={'size': 6})
plt.ylim([0, 35])
plt.subplot(3, 1, 3)
plt.plot(follower_trj_in['local_time'], follower_trj_in['filter_accer'], follower_line, linewidth=0.5, label=follower_label)
plt.plot(leader_trj_in['local_time'], leader_trj_in['filter_accer'], leader_line, linewidth=0.5, label=leader_label)
plt.legend(prop={'size': 6})
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim([-5, 5])
plt.savefig(trj_save_title, dpi=600)
plt.close('all')
def cf_trj_info_cal(all_seg_paired_cf_trj_in):
# calculate car following measurements
# output format
# ['segment_id', 'local_veh_id', 'length','local_time','follower_id', 'leader_id', 'position', 'speed', 'accer',
# 'cf_label', 'space_hwy', 'net_distance', 'time_hwy', 'speed_diff', 'TTC', 'DRAC']
# cf_label: 0 for AV-HV, 1 for HV-AV
# net_distance = space_hwy - 0.5*follower_length - 0.5*leader_length
# time_hwy = space_hwy/follower_speed
# speed_diff = follower_speed - leader_speed
# Time To Collision: TTC = speed_diff/net_distance
# Deceleration Required to Avoid Crash: DRAC = (speed_diff ** 2) / net_distance
def single_cf_pair_info_cal(follower_trj_in, leader_trj_in, cf_label_in):
global all_seg_cf_info
# input format ['segment_id', 'local_veh_id', 'length','local_time','follower_id', 'leader_id', 'position', 'speed', 'accer']
out_cf_info = follower_trj_in.copy(deep=True)
out_cf_info['cf_label'] = cf_label_in
out_cf_info['space_hwy'] = 0
out_cf_info['net_distance'] = 0
out_cf_info['time_hwy'] = 0
out_cf_info['speed_diff'] = 0
out_cf_info['TTC'] = 0
out_cf_info['DRAC'] = 0
for i in range(len(out_cf_info['segment_id'])):
current_time = out_cf_info['local_time'].iloc[i]
l_time_match = abs(leader_trj_in['local_time'] - current_time) <= 0.001
matched_leader_trj = leader_trj_in.loc[l_time_match, :]
if len(matched_leader_trj) > 0:
space_hwy = matched_leader_trj['filter_pos'].iloc[0] - out_cf_info['filter_pos'].iloc[i]
out_cf_info['space_hwy'].iloc[i] = space_hwy
net_distance = space_hwy - 0.5 * matched_leader_trj['length'].iloc[0] - 0.5 * \
out_cf_info['length'].iloc[i]
out_cf_info['net_distance'].iloc[i] = net_distance
if out_cf_info['filter_speed'].iloc[i] <= 0.1:
out_cf_info['time_hwy'].iloc[i] = 1000000
else:
out_cf_info['time_hwy'].iloc[i] = space_hwy / out_cf_info['filter_speed'].iloc[i]
speed_diff = out_cf_info['filter_speed'].iloc[i] - matched_leader_trj['filter_speed'].iloc[0]
out_cf_info['speed_diff'].iloc[i] = speed_diff
if speed_diff < 0:
out_cf_info['TTC'].iloc[i] = 0
out_cf_info['DRAC'].iloc[i] = 0
else:
out_cf_info['TTC'].iloc[i] = net_distance / speed_diff
out_cf_info['DRAC'].iloc[i] = (speed_diff ** 2) / net_distance
all_seg_cf_info = all_seg_cf_info.append(out_cf_info)
# ----- av-related cf info -----
all_seg_av_hv_trj = all_seg_paired_cf_trj_in.loc[all_seg_paired_cf_trj_in['follower_id'] == 0, :]
all_seg_hv_av_trj = all_seg_paired_cf_trj_in.loc[all_seg_paired_cf_trj_in['leader_id'] == 0, :]
av_hv_seg_id = pd.unique(all_seg_av_hv_trj['segment_id'])
for id1 in av_hv_seg_id:
segment_print = 'Now in AV-HV segment: ' + str(id1)
print(segment_print)
current_seg_trj = all_seg_av_hv_trj.loc[all_seg_av_hv_trj['segment_id'] == id1, :]
follower_id = current_seg_trj['follower_id'].iloc[0]
leader_id = current_seg_trj['leader_id'].iloc[0]
flollower_trj = current_seg_trj.loc[current_seg_trj['local_veh_id'] == follower_id, :]
leader_trj = current_seg_trj.loc[current_seg_trj['local_veh_id'] == leader_id, :]
single_cf_pair_info_cal(flollower_trj, leader_trj, 0)
follower_av_seg_id = pd.unique(all_seg_hv_av_trj['segment_id'])
for id1 in follower_av_seg_id:
segment_print = 'Now in HV-AV segment: ' + str(id1)
print(segment_print)
current_seg_trj = all_seg_hv_av_trj.loc[all_seg_hv_av_trj['segment_id'] == id1, :]
follower_id = current_seg_trj['follower_id'].iloc[0]
leader_id = current_seg_trj['leader_id'].iloc[0]
flollower_trj = current_seg_trj.loc[current_seg_trj['local_veh_id'] == follower_id, :]
leader_trj = current_seg_trj.loc[current_seg_trj['local_veh_id'] == leader_id, :]
single_cf_pair_info_cal(flollower_trj, leader_trj, 1)
# ----- hv-hv cf info -----
l1 = all_seg_paired_cf_trj_in['follower_id'] != 0
l2 = all_seg_paired_cf_trj_in['leader_id'] != 0
all_seg_hv_hv_leader_trj = all_seg_paired_cf_trj_in.loc[l1 & l2, :]
hv_hv_seg_id = pd.unique(all_seg_hv_hv_leader_trj['segment_id'])
for id1 in hv_hv_seg_id:
segment_print = 'Now in HV-HV segment: ' + str(id1)
print(segment_print)
current_seg_trj = all_seg_hv_hv_leader_trj.loc[all_seg_hv_hv_leader_trj['segment_id'] == id1, :]
all_follower_id = pd.unique(current_seg_trj['follower_id'])
for id2 in all_follower_id:
# note that one segment may have multiple hv-hv pairs
current_pair_trj = current_seg_trj.loc[current_seg_trj['follower_id'] == id2, :]
follower_id = current_pair_trj['follower_id'].iloc[0]
leader_id = current_pair_trj['leader_id'].iloc[0]
flollower_trj = current_pair_trj.loc[current_pair_trj['local_veh_id'] == follower_id, :]
leader_trj = current_pair_trj.loc[current_pair_trj['local_veh_id'] == leader_id, :]
single_cf_pair_info_cal(flollower_trj, leader_trj, 2)
def cf_pair_exclude_rules_implementation(all_seg_paired_cf_trj_in, all_seg_cf_info_in):
# this function verify if a selected CF pair is suitable for car following research
# this verification is necessary because currently the CF pairs are extracted manually by watching the top view videos and might be error-prone
# 6 rules are defined in the paper:
# rule 1: Exclude if there is no leader or follower
# rule 2: Exclude if the follower or leader is off the Lidar detection range (disappear from the video) for some time
# rule 3: Exclude if the leader or follower is a bus or heavy truck
# rule 4: Exclude if the follower changes its leader (either the follower or the leader changes its lane)
# rule 5: Exclude if follower remains standstill during the entire segment
# rule 6: Exclude if the car following state is interrupted by turning, parking, stop signs, traffic signals, pedestrians, or other obstacles
# note that: for rule 1 there is no need to verify because all selected pairs have a follower and a leader
# note that: for rule 4, since there is no lane mark in the provided dataset, so we are not able to analysis lane changing pairs
# therefore, only rules 2, 3, 5, 6 are implemented here
all_seg_paired_cf_trj_verified = all_seg_paired_cf_trj_in.copy(deep=True)
def single_cf_pair_verification(flollower_trj_in, leader_trj_in, follower_cf_info_in):
# this function implement rules 2, 3, 5, 6
# output is 0 or 1: 0 denotes this pair is valid, 1 denotes this pair will be removed
output_label = 0 # default value is 0
flollower_trj_in.reset_index(inplace=True)
leader_trj_in.reset_index(inplace=True)
follower_cf_info_in.reset_index(inplace=True)
# rule 2
for i in range(1, len(flollower_trj_in.loc[:, 'segment_id'])):
# if the time difference between two consecutive points is larger than 0.2s, then this pair is excluded
if flollower_trj_in.loc[i, 'local_time'] - flollower_trj_in.loc[i - 1, 'local_time'] >= 0.2:
output_label = 1
print('Rule 2 excluded')
return output_label
for j in range(1, len(leader_trj_in.loc[:, 'segment_id'])):
# if the time difference between two consecutive points is larger than 0.2s, then this pair is excluded
if leader_trj_in.loc[j, 'local_time'] - leader_trj_in.loc[j - 1, 'local_time'] >= 0.2:
output_label = 1
print('Rule 2 excluded')
return output_label
# rule 3
large_vehicle_length_threshold = 8
if flollower_trj_in.loc[0, 'length'] >= large_vehicle_length_threshold:
output_label = 1
print('Rule 3 excluded')
return output_label
if leader_trj_in.loc[0, 'length'] >= large_vehicle_length_threshold:
output_label = 1
print('Rule 3 excluded')
return output_label
# rule 5
if flollower_trj_in.loc[:, 'filter_speed'].max() <= 0.1:
# the case where the follower is always standstill
output_label = 1
print('Rule 5 excluded')
return output_label
# rule 6
# based on the slope of v-s curve, if the slope is obviously negative, then this pair is excluded
detection_length = 50 # calculate the slope every 50 points
slope_threhold = -0.5 # if the slope of v-s curve is smaller than this threshold, then this pair is excluded
for i in range(len(follower_cf_info_in.loc[:, 'segment_id']) - detection_length):
# monotonic test, only in the case of monotonic v-s curve will be slope be calculated
l_speed = follower_cf_info_in.loc[i:i+detection_length - 1, 'filter_speed'].is_monotonic_increasing or \
follower_cf_info_in.loc[i:i+detection_length - 1, 'filter_speed'].is_monotonic_decreasing
l_spacing = follower_cf_info_in.loc[i:i+detection_length - 1, 'space_hwy'].is_monotonic_increasing or \
follower_cf_info_in.loc[i:i+detection_length - 1, 'space_hwy'].is_monotonic_decreasing
if l_speed and l_spacing:
v_data = follower_cf_info_in.loc[i:i+detection_length - 1, 'filter_speed'].values.reshape(-1, 1)
s_data = follower_cf_info_in.loc[i:i+detection_length - 1, 'space_hwy'].values.reshape(-1, 1)
current_regression = LinearRegression()
current_regression.fit(s_data, v_data)
current_slope = current_regression.coef_[0]
if current_slope <= slope_threhold:
output_label = 1
print('Rule 6 excluded')
return output_label
return output_label
all_seg_id = pd.unique(all_seg_paired_cf_trj_in['segment_id'])
for id1 in all_seg_id:
current_seg_trj = all_seg_paired_cf_trj_in.loc[all_seg_paired_cf_trj_in['segment_id'] == id1, :]
current_seg_cf_info = all_seg_cf_info_in.loc[all_seg_cf_info_in['segment_id'] == id1, :]
all_follower_id = pd.unique(current_seg_trj['follower_id'])
for id2 in all_follower_id:
current_pair_trj = current_seg_trj.loc[current_seg_trj['follower_id'] == id2, :]
current_follower_cf_info = current_seg_cf_info.loc[current_seg_cf_info['follower_id'] == id2, :]
follower_id = current_pair_trj['follower_id'].iloc[0]
leader_id = current_pair_trj['leader_id'].iloc[0]
segment_print = 'Now in segment: ' + str(id1) + ' Follower:' + str(follower_id) + ' Leader:' + str(leader_id)
print(segment_print)
flollower_trj = current_pair_trj.loc[current_pair_trj['local_veh_id'] == follower_id, :]
leader_trj = current_pair_trj.loc[current_pair_trj['local_veh_id'] == leader_id, :]
verification_result = single_cf_pair_verification(flollower_trj, leader_trj, current_follower_cf_info)
if verification_result:
# remove this pair
l_segment_id = all_seg_paired_cf_trj_verified['segment_id'] == id1
l_follower_id = all_seg_paired_cf_trj_verified['follower_id'] == follower_id
l_leader_id = all_seg_paired_cf_trj_verified['leader_id'] == leader_id
l_overall = (l_segment_id & l_follower_id) & l_leader_id
all_seg_paired_cf_trj_verified.drop(all_seg_paired_cf_trj_verified[l_overall].index, inplace=True)
all_seg_paired_cf_trj_verified.to_csv('data_save/all_seg_paired_cf_trj_verified.csv')
def lidar_detection_range():
# calculate the maximum detection range in each segment for each type of objects
all_segment_lidar_original_lidar_distance = | pd.DataFrame() | pandas.DataFrame |
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
def get_html_text(html):
try:
return html.text
except AttributeError:
return np.nan
def parse_title(html_title):
title_text = html_title.text
try:
pub_end_index = title_text.index("'")
pub = title_text[:pub_end_index]
except ValueError:
pub = np.nan
year = title_text[-4:]
return pub, year
def scrape_page(soup):
"""scrape the html from a page for album of the year and convert to a data frame"""
rows = soup.findAll(class_="albumListRow")
list_length = len(rows)
title = soup.find("title")
# Exclude non year-end lists
if "so far" in title.text.lower():
return None
else:
pub, year = parse_title(title)
#Get the rank, title, and artist as a list
page_data = dict()
collect_terms = ["albumListTitle", "albumListDate", "albumListGenre", "scoreText", "scoreValue"]
for term in collect_terms:
page_data[term] = [get_html_text(row.find(class_=term)) for row in rows]
page_df = | pd.DataFrame.from_dict(page_data) | pandas.DataFrame.from_dict |
"""This script is designed to perform statistics of demographic information
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr,spearmanr,kendalltau
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
from eslearn.utils.lc_read_write_mat import read_mat, write_mat
#%% ----------------------------------Our center 550----------------------------------
uid_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\selected_550.txt'
scale_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\10-24ๅคง่กจ.xlsx'
headmotion_file = r'D:\WorkStation_2018\SZ_classification\Scale\ๅคดๅจๅๆฐ_1322.xlsx'
scale_data_550 = pd.read_excel(scale_path_550)
uid_550 = pd.read_csv(uid_path_550, header=None)
scale_selected_550 = pd.merge(uid_550, scale_data_550, left_on=0, right_on='folder', how='inner')
describe_bprs_550 = scale_selected_550.groupby('่ฏๆญ')['BPRS_Total'].describe()
describe_age_550 = scale_selected_550.groupby('่ฏๆญ')['ๅนด้พ'].describe()
describe_duration_550 = scale_selected_550.groupby('่ฏๆญ')['็
็จๆ'].describe()
describe_durgnaive_550 = scale_selected_550.groupby('่ฏๆญ')['็จ่ฏ'].value_counts()
describe_sex_550 = scale_selected_550.groupby('่ฏๆญ')['ๆงๅซ'].value_counts()
# Demographic
demographic_info_dataset1 = scale_selected_550[['folder', '่ฏๆญ', 'ๅนด้พ', 'ๆงๅซ', '็
็จๆ']]
headmotion = pd.read_excel(headmotion_file)
headmotion = headmotion[['Subject ID','mean FD_Power']]
demographic_info_dataset1 = pd.merge(demographic_info_dataset1, headmotion, left_on='folder', right_on='Subject ID', how='inner')
demographic_info_dataset1 = demographic_info_dataset1.drop(columns=['Subject ID'])
site_dataset1 = pd.DataFrame(np.zeros([len(demographic_info_dataset1),1]))
site_dataset1.columns = ['site']
demographic_dataset1_all = pd.concat([demographic_info_dataset1 , site_dataset1], axis=1)
demographic_dataset1_all.columns = ['ID','Diagnosis', 'Age', 'Sex', 'Duration', 'MeanFD', 'Site']
demographic_dataset1 = demographic_dataset1_all[['ID','Diagnosis', 'Age', 'Sex', 'MeanFD', 'Site']]
demographic_dataset1['Diagnosis'] = np.int32(demographic_dataset1['Diagnosis'] == 3)
# Duration and age
demographic_duration_dataset1 = demographic_dataset1_all[['Duration', 'Age']].dropna()
np.corrcoef(demographic_duration_dataset1['Duration'], demographic_duration_dataset1['Age'])
pearsonr(demographic_duration_dataset1['Duraton'], demographic_duration_dataset1['Age'])
#%% ----------------------------------BeiJing 206----------------------------------
uid_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\ๅๅคง็ฒพๅไบบๅฃๅญฆๅๅ
ถๅฎ่ตๆ\SZ_NC_108_100.xlsx'
scale_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\ๅๅคง็ฒพๅไบบๅฃๅญฆๅๅ
ถๅฎ่ตๆ\SZ_NC_108_100-WF.csv'
headmotion_file_206 = r'D:\WorkStation_2018\SZ_classification\Scale\ๅๅคง็ฒพๅไบบๅฃๅญฆๅๅ
ถๅฎ่ตๆ\parameters\FD_power'
uid_to_remove = ['SZ010109','SZ010009']
scale_data_206 = pd.read_csv(scale_path_206)
scale_data_206 = scale_data_206.drop(np.array(scale_data_206.index)[scale_data_206['ID'].isin(uid_to_remove)])
scale_data_206['PANSStotal1'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['PANSStotal1'].values])
Pscore = pd.DataFrame(scale_data_206[['P1', 'P2', 'P3', 'P4', 'P4', 'P5', 'P6', 'P7']].iloc[:106,:], dtype = np.float64)
Pscore = np.sum(Pscore, axis=1).describe()
Nscore = | pd.DataFrame(scale_data_206[['N1', 'N2', 'N3', 'N4', 'N4', 'N5', 'N6', 'N7']].iloc[:106,:], dtype=np.float64) | pandas.DataFrame |
import quandl
mydata = quandl.get("YAHOO/INDEX_DJI", start_date="2005-12-01", end_date="2005-12-05")
import pandas as pd
authtoken = '<PASSWORD>'
def get_data_quandl(symbol, start_date, end_date):
data = quandl.get(symbol, start_date=start_date, end_date=end_date, authtoken=authtoken)
return data
def generate_features(df):
""" Generate features for a stock/index based on historical price and performance
Args:
df (dataframe with columns "Open", "Close", "High", "Low", "Volume", "Adjusted Close")
Returns:
dataframe, data set with new features
"""
df_new = pd.DataFrame()
# 6 original features
df_new['open'] = df['Open']
df_new['open_1'] = df['Open'].shift(1)
df_new['close_1'] = df['Close'].shift(1)
df_new['high_1'] = df['High'].shift(1)
df_new['low_1'] = df['Low'].shift(1)
df_new['volume_1'] = df['Volume'].shift(1)
# 31 original features
# average price
df_new['avg_price_5'] = pd.rolling_mean(df['Close'], window=5).shift(1)
df_new['avg_price_30'] = pd.rolling_mean(df['Close'], window=21).shift(1)
df_new['avg_price_365'] = pd.rolling_mean(df['Close'], window=252).shift(1)
df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']
df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']
df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']
# average volume
df_new['avg_volume_5'] = pd.rolling_mean(df['Volume'], window=5).shift(1)
df_new['avg_volume_30'] = | pd.rolling_mean(df['Volume'], window=21) | pandas.rolling_mean |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import numpy as np
from pandas.core.api import (Index, Series, TimeSeries, DataFrame, isnull)
import pandas.core.datetools as datetools
from pandas.util.testing import assert_series_equal
import pandas.util.testing as common
#-------------------------------------------------------------------------------
# Series test cases
class TestSeries(unittest.TestCase):
def setUp(self):
self.ts = common.makeTimeSeries()
self.series = common.makeStringSeries()
self.objSeries = common.makeObjectSeries()
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(common.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assertRaises(Exception, Series, [0, 1, 2], index=None)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_corner(self):
df = common.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(common.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, series._set_index, None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, series._set_index,
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series.fromValue(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
strings = Series.fromValue('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
d = datetime.now()
dates = Series.fromValue(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
def test_contains(self):
common.assert_contains_all(self.ts.index, self.ts)
def test_save_load(self):
self.series.save('tmp1')
self.ts.save('tmp3')
unp_series = Series.load('tmp1')
unp_ts = Series.load('tmp3')
os.remove('tmp1')
os.remove('tmp3')
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(Exception, self.ts.__getitem__, d),
def test_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(common.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(common.makeIntIndex(20).astype(float),
index=common.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(common.randn(1000), index=np.arange(1000)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
def test_toString(self):
from cStringIO import StringIO
self.ts.toString(buffer=StringIO())
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
self.assert_(self.ts.keys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_stats(self):
self.series[5:15] = np.NaN
s1 = np.array(self.series)
s1 = s1[-np.isnan(s1)]
self.assertEquals(np.min(s1), self.series.min())
self.assertEquals(np.max(s1), self.series.max())
self.assertEquals(np.sum(s1), self.series.sum())
self.assertEquals(np.mean(s1), self.series.mean())
self.assertEquals(np.std(s1, ddof=1), self.series.std())
self.assertEquals(np.var(s1, ddof=1), self.series.var())
try:
from scipy.stats import skew
common.assert_almost_equal(skew(s1, bias=False),
self.series.skew())
except ImportError:
pass
self.assert_(not np.isnan(np.sum(self.series)))
self.assert_(not np.isnan(np.mean(self.series)))
self.assert_(not np.isnan(np.std(self.series)))
self.assert_(not np.isnan(np.var(self.series)))
self.assert_(not np.isnan(np.min(self.series)))
self.assert_(not np.isnan(np.max(self.series)))
self.assert_(np.isnan(Series([1.], index=[1]).std()))
self.assert_(np.isnan(Series([1.], index=[1]).var()))
self.assert_(np.isnan(Series([1.], index=[1]).skew()))
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op):
cython_or_numpy = op(series, other)
python = series.combineFunc(other, op)
common.assert_almost_equal(cython_or_numpy, python)
def check(other):
_check_op(other, operator.add)
_check_op(other, operator.sub)
_check_op(other, operator.div)
_check_op(other, operator.mul)
_check_op(other, operator.pow)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.div(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x))
check(self.ts * 2)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
deltas = Series([timedelta(1)] * 5, index=np.arange(5))
sub_deltas = deltas[::2]
deltas5 = deltas * 5
deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
common.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
self.assertRaises(Exception, self.ts.__pow__, df)
def test_combineFirst(self):
series = Series(common.makeIntIndex(20).astype(float),
index=common.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combineFirst(series_copy)
self.assert_(np.array_equal(combined, series))
# Holes filled from input
combined = series_copy.combineFirst(series)
self.assert_(np.isfinite(combined).all())
self.assert_(np.array_equal(combined[::2], series[::2]))
self.assert_(np.array_equal(combined[1::2], series_copy[1::2]))
# mixed types
index = common.makeStringIndex(20)
floats = Series(common.randn(20), index=index)
strings = Series(common.makeStringIndex(10), index=index[::2])
combined = strings.combineFirst(floats)
common.assert_dict_equal(strings, combined, compare_keys=False)
common.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combineFirst(Series([], index=[]))
assert_series_equal(s, result)
def test_overloads(self):
methods = ['argsort', 'cumsum', 'cumprod']
for method in methods:
func = getattr(np, method)
self.assert_(np.array_equal(func(self.ts), func(np.array(self.ts))))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_(np.array_equal(result, expected))
argsorted = self.ts.argsort()
self.assert_(argsorted.dtype == np.int_)
def test_median(self):
self.assertAlmostEqual(np.median(self.ts), self.ts.median())
ts = self.ts.copy()
ts[::2] = np.NaN
self.assertAlmostEqual(np.median(ts.valid()), ts.median())
def test_corr(self):
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
# No overlap
self.assert_(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# additional checks?
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
def test_sort(self):
ts = self.ts.copy()
ts.sort()
self.assert_(np.array_equal(ts, self.ts.order()))
self.assert_(np.array_equal(ts.index, self.ts.order().index))
def test_order(self):
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.order()
self.assert_(np.isnan(result[-5:]).all())
self.assert_(np.array_equal(result[:-5], np.sort(vals[5:])))
result = ts.order(missingAtEnd=False)
self.assert_(np.isnan(result[:5]).all())
self.assert_(np.array_equal(result[5:], np.sort(vals[5:])))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.order()
def test_map(self):
result = self.ts.map(lambda x: x * 2)
self.assert_(np.array_equal(result, self.ts * 2))
def test_toCSV(self):
self.ts.toCSV('_foo')
os.remove('_foo')
def test_toDict(self):
self.assert_(np.array_equal(Series(self.ts.toDict()), self.ts))
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
common.assert_dict_equal(result, ts, compare_keys=False)
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
common.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)
offset = datetools.bday
shifted = self.ts.shift(1, offset=offset)
unshifted = shifted.shift(-1, offset=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, offset=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, timeRule='WEEKDAY')
unshifted = shifted.shift(-1, timeRule='WEEKDAY')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
def test_truncate(self):
offset = datetools.bday
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
| assert_series_equal(truncated, expected) | pandas.util.testing.assert_series_equal |
""" Indexer for raman data files """
import hashlib
from typing import List
# get_directory_paths_for_run_mode
# from .index_selection import index_selection
import logging
import sys
from pathlib import Path
import pandas as pd
from raman_fitting.config.filepath_helper import get_directory_paths_for_run_mode
# parse_filepath_to_sid_and_pos
from raman_fitting.indexing.filename_parser import index_dtypes_collection
from raman_fitting.indexing.filename_parser_collector import make_collection
# from raman_fitting.utils._dev_sqlite_db import df_to_db_sqlalchemy
# from .. import __package_name__
logger = logging.getLogger(__name__)
logger.propagate = False
__all__ = ["MakeRamanFilesIndex"]
#%%
class MakeRamanFilesIndex:
"""
Finds the RAMAN files in the data folder from config and creates an overview, on the attribute .index
finds a list of files,
"""
# index_file_sample_cols = {'FileStem': 'string',
# 'SampleID': 'string',
# 'SamplePos': 'int64',
# 'SampleGroup': 'string',
# 'FilePath': 'string')
# index_file_stat_cols = ('FileCreationDate' , 'FileCreation','FileModDate', 'FileMod', 'FileHash')
# INDEX_FILE_NAME = 'index.csv'
debug = False
table_name = "ramanfiles"
# RESULTS_DIR = config.RESULTS_DIR,
# DATASET_DIR = config.DATASET_DIR,
# INDEX_FILE = config.INDEX_FILE,
def __init__(
self, force_reload=True, run_mode="normal", dataset_dirs=None, **kwargs
):
self._cqnm = self.__class__.__qualname__
self._kwargs = kwargs
self.force_reload = force_reload
self.run_mode = run_mode
if not dataset_dirs:
dataset_dirs = get_directory_paths_for_run_mode(run_mode=self.run_mode)
self.dataset_dirs = dataset_dirs
for k, val in self.dataset_dirs.items():
if isinstance(val, Path):
setattr(self, k, val)
# if val.is_dir() or val.is_file():
self.raman_files = self.find_files(data_dir=self.DATASET_DIR)
self.index = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from json import JSONDecoder
import random
import pygal
from pygal.style import Style
import pandas
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
# ############### Errors ################
class DateError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# ############### Tools ################
def buildDoubleIndex(index1, index2, datatype):
it = -1
newindex1 = []
for index in index2:
if index == 0:
it += 1
newindex1.append(index1[it])
arrays = [newindex1, index2]
tuples = list(zip(*arrays))
return pandas.MultiIndex.from_tuples(tuples, names=['event', datatype])
def buildNewColumn(index2, column):
it = -1
newcolumn = []
for index in index2:
if index == 0:
it += 1
newcolumn.append(column[it])
return newcolumn
def dateInRange(datetimeTested, begin=None, end=None):
if begin is None:
begin = datetime(1970, 1, 1)
if end is None:
end = datetime.now()
return begin <= datetimeTested <= end
def addColumn(dataframe, columnList, columnName):
dataframe.loc[:, columnName] = pandas.Series(columnList, index=dataframe.index)
def toDatetime(date):
return parse(date)
def checkDateConsistancy(begindate, enddate, lastdate):
if begindate is not None and enddate is not None:
if begindate > enddate:
raise DateError('begindate ({}) cannot be after enddate ({})'.format(begindate, enddate))
if enddate is not None:
if toDatetime(enddate) < lastdate:
raise DateError('enddate ({}) cannot be before lastdate ({})'.format(enddate, lastdate))
if begindate is not None:
if toDatetime(begindate) > datetime.now():
raise DateError('begindate ({}) cannot be after today ({})'.format(begindate, datetime.now().date()))
def setBegindate(begindate, lastdate):
return max(begindate, lastdate)
def setEnddate(enddate):
return min(enddate, datetime.now())
def getLastdate(last):
return (datetime.now() - timedelta(days=int(last))).replace(hour=0, minute=0, second=0, microsecond=0)
# ############### Formatting ################
def eventsListBuildFromList(filename):
with open(filename, 'r') as myfile:
s = myfile.read().replace('\n', '')
decoder = JSONDecoder()
s_len = len(s)
Events = []
end = 0
while end != s_len:
Event, end = decoder.raw_decode(s, idx=end)
Events.append(Event)
data = []
for e in Events:
data.append(pandas.DataFrame.from_dict(e, orient='index'))
Events = pandas.concat(data)
for it in range(Events['attribute_count'].size):
if Events['attribute_count'][it] is None:
Events['attribute_count'][it] = '0'
else:
Events['attribute_count'][it] = int(Events['attribute_count'][it])
Events = Events.set_index('id')
return Events
def eventsListBuildFromArray(jdata):
'''
returns a structure listing all primary events in the sample
'''
data = [pandas.DataFrame.from_dict(e, orient='index') for e in jdata['response']]
events = | pandas.concat(data) | pandas.concat |
import numpy as np
import pandas as pd
import xgboost as xgb
import scipy.stats as ss
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def to_categorical(df, cols):
for col in cols:
df[col] = pd.Categorical(df[col])
df[col] = df[col].cat.codes
return df
def to_numeric(df, cols):
for col in cols:
df[col] = pd.to_numeric(df[col])
return df
def split_data(df):
# x = df.drop(columns=['ma_binding_score'])
# y = df['ma_binding_score']
train = df['days_since_g0'] < 20200401
train_data = df[train]
test_data = df[~train]
x_train = train_data.drop(columns=['ma_binding_score'])
y_train = train_data['ma_binding_score']
x_test = test_data.drop(columns=['ma_binding_score'])
y_test = test_data['ma_binding_score']
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=101)
return x_train, x_test, y_train, y_test
def load_data(filename='dataframe.pickle'):
df = | pd.read_pickle(filename) | pandas.read_pickle |
import pandas as pd
from voucher_opt.features.feature_definitions import Feature
from voucher_opt.logger import log
from voucher_opt.transformers.transformer import Transformer
class CategoricalTransformer(Transformer):
def __init__(self, categorical_features: [Feature], categorical_value_ratio_thresh):
self._categorical_features: [Feature] = categorical_features
self._categorical_value_ratio_thresh = categorical_value_ratio_thresh
self._feature_columns = None
def fit(self, df: pd.DataFrame):
input_feature_columns = [feat.short_name for feat in self._categorical_features]
feature_df = df[input_feature_columns].copy()
for feature in self._categorical_features:
log.debug(f'Transforming feature: {feature.original_name}')
feature_df.loc[:, feature.short_name] = self._filter_categories(feature_df[feature.short_name])
feature_df = | pd.get_dummies(feature_df, columns=input_feature_columns) | pandas.get_dummies |
import pandas as pd
import numpy as np
import sys
import tools.constants as c
from functools import reduce
pd.set_option('max_columns', 1000)
pd.set_option('max_info_columns', 1000)
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 30000)
pd.set_option('max_colwidth', 4000)
pd.set_option('display.float_format', lambda x: '%.8f' % x)
pd.set_option('chained_assignment',None)
def rne_data_create():
# pull state and national
state = | pd.read_excel(f's3://emkf.data.research/indicators/kese/data_outputs/2021_kese_website/2021_rob_kese_files/Kauffman_Indicators_Data_State_1996_2021.xlsx', sheet_name='Rate of New Entrepreneurs') | pandas.read_excel |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval",),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), np.array(["0", "1"], dtype=object)),
(pd.Categorical(["a", "a"]), np.array([0, 0], dtype="int8")),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"]),
np.array(["2017-01-01T00:00:00"], dtype="M8[ns]"),
),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"], tz="US/Eastern"),
np.array(["2017-01-01T05:00:00"], dtype="M8[ns]"),
),
(pd.TimedeltaIndex([10 ** 10]), np.array([10 ** 10], dtype="m8[ns]")),
(
pd.PeriodIndex(["2017", "2018"], freq="D"),
np.array([17167, 17532], dtype=np.int64),
),
],
)
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
elif is_timedelta64_dtype(any_numpy_dtype):
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, PandasArray)
@pytest.mark.parametrize(
"array, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
(IntervalArray.from_breaks([0, 1]), "_left"),
(SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
(
DatetimeArray(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
"_data",
),
],
)
def test_array(array, attr, index_or_series):
box = index_or_series
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = box(array, copy=False).array
if attr:
array = getattr(array, attr)
result = getattr(result, attr)
assert result is array
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
with pytest.raises(ValueError, match="MultiIndex"):
idx.array
@pytest.mark.parametrize(
"array, expected",
[
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)),
(
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
),
(
pd.core.arrays.integer_array([0, np.nan]),
np.array([0, pd.NA], dtype=object),
),
(
IntervalArray.from_breaks([0, 1, 2]),
np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object),
),
(SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)),
# tz-naive datetime
(
DatetimeArray(np.array(["2000", "2001"], dtype="M8[ns]")),
np.array(["2000", "2001"], dtype="M8[ns]"),
),
# tz-aware stays tz`-aware
(
DatetimeArray(
np.array(
["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
np.array(
[
pd.Timestamp("2000-01-01", tz="US/Central"),
pd.Timestamp("2000-01-02", tz="US/Central"),
]
),
),
# Timedelta
(
TimedeltaArray(np.array([0, 3600000000000], dtype="i8"), freq="H"),
np.array([0, 3600000000000], dtype="m8[ns]"),
),
],
)
def test_to_numpy(array, expected, index_or_series):
box = index_or_series
thing = box(array)
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = thing.to_numpy()
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize(
"arr", [np.array([1, 2, 3], dtype="int64"), np.array(["a", "b", "c"], dtype=object)]
)
def test_to_numpy_copy(arr, as_series):
obj = pd.Index(arr, copy=False)
if as_series:
obj = pd.Series(obj.values, copy=False)
# no copy by default
result = obj.to_numpy()
assert np.shares_memory(arr, result) is True
result = obj.to_numpy(copy=False)
assert np.shares_memory(arr, result) is True
# copy=True
result = obj.to_numpy(copy=True)
assert np.shares_memory(arr, result) is False
@pytest.mark.parametrize("as_series", [True, False])
def test_to_numpy_dtype(as_series):
tz = "US/Eastern"
obj = pd.DatetimeIndex(["2000", "2001"], tz=tz)
if as_series:
obj = pd.Series(obj)
# preserve tz by default
result = obj.to_numpy()
expected = np.array(
[pd.Timestamp("2000", tz=tz), pd.Timestamp("2001", tz=tz)], dtype=object
)
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="object")
| tm.assert_numpy_array_equal(result, expected) | pandas._testing.assert_numpy_array_equal |
import os
from pathlib import Path
import click
import zarr
import torch
from tqdm.auto import tqdm
from pathlib import Path
import numpy as np
import pandas as pd
import scipy.ndimage
import matplotlib.pyplot as plt
from dotenv import load_dotenv
from omegaconf import OmegaConf
from torch.utils.data import DataLoader
from brainage.model.model3d import AgeModel3DVolume
from brainage.dataset.dataset3d import BrainDataset
from brainage.dataset.grid3d import GridPatchSampler, DataReaderHDF5
load_dotenv()
DATA = Path(os.getenv('DATA'))
CONFIG = Path(os.getenv('CONFIG'))
# regional age
@click.command()
@click.argument('checkpoint', type=click.Path(exists=True))
@click.option('--overlap', default=20)
@click.option('--batch_size', default=32)
@click.option('--chunk_size', default=50)
def regional_age_map(checkpoint, overlap=25, batch_size=32, chunk_size=50):
patch_overlap = [overlap, overlap, overlap]
out_dir = Path(DATA/'nako/processed/patchwise')
out_dir.mkdir(exist_ok=True)
print('loading model')
print(f'checkpoint: {checkpoint}')
model = AgeModel3DVolume.load_from_checkpoint(checkpoint, train_ds=None, val_ds=None)
model.eval()
device = torch.device('cuda')
model = model.to(device)
# setup
data_path = model.hparams['dataset']['data']
data_group = model.hparams['dataset']['group']
info = model.hparams['dataset']['info']
infocolumn = model.hparams['dataset']['column']
val_set = model.hparams['dataset']['val']
patch_size = np.array(model.hparams['dataset']['patch_size'])
val_keys = [l.strip() for l in Path(val_set).open().readlines()]
print(f'total number of keys {len(val_keys)}')
chunk_num = len(val_keys)//chunk_size
chunks = np.array_split(np.array(val_keys), chunk_num)
for c, chunk in enumerate(chunks):
print(f'chunk {c}/{chunk_num}')
chunk_keys = list(chunk)
print('loading data')
print(f'ds: {data_path} - {data_group}')
print(f'set: {val_set}')
info_df = pd.read_csv(info, index_col=0, dtype={'key': 'string', infocolumn: np.float32})
ds = GridPatchSampler(data_path,
chunk_keys[:],
patch_size, patch_overlap,
out_channels=2,
out_dtype=np.float32,
image_group=data_group,
ReaderClass=DataReaderHDF5,
pad_args={'mode': 'constant'})
loader = DataLoader(ds, batch_size=batch_size, num_workers=1, drop_last=False, shuffle=False)
print('processing subjects')
pred = {'key': [], 'pos0': [], 'pos1': [], 'pos2': [], 'y': [], 'yhat0': [], 'yhat1': []}
for sample in loader:
print('.', end='')
# predict
x = sample['data'].float()
x = x.to(device)
position = sample['position'].float()
y_hat = model(x, pos=None)
# store map with predicted values
shape = np.array(x.size())
sample['data'] = np.einsum('ij,klm->ijklm',
y_hat.detach().cpu().numpy(),
np.ones(shape[2:]))
ds.add_processed_batch(sample)
# store results
for b in range(len(sample['key'])):
key = sample['key'][b]
y = info_df.loc[key][infocolumn]
pred['key'].append(key)
pred['pos0'].append(position[b,0].cpu().item())
pred['pos1'].append(position[b,1].cpu().item())
pred['pos2'].append(position[b,2].cpu().item())
pred['y'].append(y)
pred['yhat0'].append(y_hat[b,0].cpu().item())
pred['yhat1'].append(y_hat[b,1].cpu().item())
print('storing results')
df = pd.DataFrame.from_dict(pred)
if (out_dir/f'predictions_regional.feather').is_file():
df_0 = pd.read_feather(out_dir/f'predictions_regional.feather').set_index('key')
df = df_0.combine_first(df.set_index('key'))
df = df.reset_index()
df.to_feather(out_dir/f'predictions_regional.feather')
results = ds.get_assembled_data()
with zarr.open(str(out_dir/'maps.zarr')) as root:
maps = root.require_group('agemaps')
zarr.copy_all(results, maps, if_exists='replace')
# grad cam grid
@click.command()
@click.argument('checkpoint', type=click.Path(exists=True))
@click.option('--overlap', default=5)
@click.option('--patch_size', default=64)
def gradcam_grid(checkpoint, overlap=25, patch_size=64):
patch_overlap = [overlap, overlap, overlap]
out_dir = Path(DATA/'nako/processed/patchwise')
out_dir.mkdir(exist_ok=True)
batch_size = 1
print('loading model')
print(f'checkpoint: {checkpoint}')
model = AgeModel3DVolume.load_from_checkpoint(checkpoint, train_ds=None, val_ds=None)
model.eval()
device = torch.device('cuda')
model = model.to(device)
# setup
data_path = model.hparams['dataset']['data']
data_group = model.hparams['dataset']['group']
info = model.hparams['dataset']['info']
infocolumn = model.hparams['dataset']['column']
val_set = model.hparams['dataset']['val']
patch_size = [patch_size, patch_size, patch_size]
val_keys = [l.strip() for l in Path(val_set).open().readlines()]
print('loading data')
print(f'ds: {data_path} - {data_group}')
print(f'set: {val_set}')
info_df = | pd.read_csv(info, index_col=0, dtype={'key': 'string', infocolumn: np.float32}) | pandas.read_csv |
import unittest
import os
import pandas as pd
import logging
from sklearn.exceptions import NotFittedError
from automatminer.utils.package_tools import compare_columns, check_fitted, set_fitted
from automatminer.utils.ml_tools import is_greater_better, regression_or_classification
from automatminer.utils.log_tools import initialize_logger, initialize_null_logger
from automatminer.base import DataframeTransformer, logger_base_name
run_dir = os.getcwd()
class MyTransformer(DataframeTransformer):
def __init__(self):
self.is_fit = False
@set_fitted
def fit(self, df, target):
return df
@check_fitted
def transform(self, df, target):
return df
class TestUtils(unittest.TestCase):
def test_logger_initialization(self):
log = initialize_logger(logger_base_name, level=logging.DEBUG)
log.info("Test logging.")
log.debug("Test debug.")
log.warning("Test warning.")
# test the log is written to run dir (e.g. where the script was called
# from and not the location of this test file
log_file = os.path.join(run_dir, logger_base_name + ".log")
self.assertTrue(os.path.isfile(log_file))
with open(log_file, 'r') as f:
lines = f.readlines()
self.assertTrue("logging" in lines[0])
self.assertTrue("debug" in lines[1])
self.assertTrue("warning" in lines[2])
null = initialize_null_logger("matbench_null")
null.info("Test null log 1.")
null.debug("Test null log 2.")
null.warning("Test null log 3.")
null_log_file = os.path.join(run_dir, logger_base_name + "_null.log")
self.assertFalse(os.path.isfile(null_log_file))
def test_is_greater_better(self):
self.assertTrue(is_greater_better('accuracy'))
self.assertTrue(is_greater_better('r2_score'))
self.assertTrue(is_greater_better('neg_mean_squared_error'))
self.assertFalse(is_greater_better('mean_squared_error'))
def test_compare_columns(self):
df1 = | pd.DataFrame({"a": [1, 2], "b": [2, 3]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pathway_file = '../../drp-data/pathways/9606.enrichr_pathway.edge'
pathway = pd.read_csv(pathway_file, sep='\t', header=None)
print("pathways:", pathway[0].nunique())
print("pathway genes:", pathway[1].nunique())
gsc_filtered = '../../KnowEng_GSC/GSC_10mod/drawr_filtered/DraWR_GSC_Enrichr_STRINGExp.xlsx'
ppi_file = '../../drp-data/pathways/9606.STRING_experimental.edge'
ppi = pd.read_csv(ppi_file, sep='\t', header=None)
print("PPI original edges:", len(ppi))
ppi['norm_score'] = ppi[2]/ppi[2].max()
ppi = ppi.loc[ppi['norm_score'] > 0.5]
print("PPI filtered edges:", len(ppi))
nodes = list(set(ppi[0]).union(set(ppi[1])))
print("PPI nodes:", len(nodes) )
folder = 'CX_ens10'
mean_attribution_file = 'results/CX_ens10/all_attributions.csv'
feature_attr = pd.read_csv(mean_attribution_file, index_col=0)
top_genes_file = 'results/CX_ens10/top_genes_mean_aggregation_info.xlsx'
writer_a = pd.ExcelWriter('results/%s/one_hop.xlsx'%folder, engine='xlsxwriter')
drugs = [
'bleomycin',
'cisplatin',
'cyclophosphamide',
'docetaxel',
'doxorubicin',
'etoposide',
'gemcitabine',
'irinotecan',
'oxaliplatin',
'paclitaxel',
'pemetrexed',
'tamoxifen',
'temozolomide',
'vinorelbine']
# use dictionary coz it's faster
conv_file = '../../drp-data/lists/hgnc2ensembl.txt'
f = open(conv_file, 'r')
conv_table = {}
for line in f:
line = line.strip().split(',')
if line[1] != "":
conv_table[line[0]] = line[1]
# print(conv_table)
for drug in drugs:
gsc_pathways = | pd.read_excel(gsc_filtered, sheet_name=drug, index_col='property_gene_set_id') | pandas.read_excel |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#task1
#Write a Python program to draw a line with suitable label in the x axis, y axis and a title.
data=np.arange(12)
plt.plot(data)
plt.title("X and Y graph")
plt.xlabel("X values")
plt.ylabel("Y values")
plt.show()
#task2
#Write a Python program to draw line charts of the financial data of Alphabet Inc. between October 3, 2016 to October 7, 2016
date=["10-03-16","10-04-16","10-05-16","10-6-16","10-07-16"]
fin_data=[[774.25,776.065002,769.5,772.559998],
[776.030029,778.710022,772.890015,776.429993],
[779.309998,782.070007,775.650024,776.469971],
[779,780.47998,775.539978,776.859985],
[779.659973,779.659973,770.75,775.080017]
]
df=pd.DataFrame(fin_data,index=date)
df.plot.bar()
plt.show()
#task3
#Write a Python program to display the grid and draw line charts of the closing value of Alphabet Inc. between October 3, 2016 to October 7, 2016. Customized the grid lines with linestyle -, width .5. and color blue.
date = ["03-10-16", "04-10-16", "05-10-16", "06-10-16", "07-10-16"]
closing_value = [772.559998, 776.429993, 776.469971, 776.859985, 775.080017]
df = pd.Series(closing_value, index=date)
df.plot.bar(color="yellow", alpha=0.7)
plt.grid(color='blue', linestyle='-', linewidth=.5)
plt.title("Closing value of Alphabet Inc. between October 3, 2016 to October 7, 2016")
plt.xlabel("Date")
plt.ylabel("Closing balance")
plt.show()
#task4
#Write a Python programming to display a bar chart of the popularity of programming Languages
lang = ["Java", "Python", "PHP", "JavaScript", "C#", "C++"]
pop = [22.2, 17.6, 8.8, 8, 7.7, 6.7]
df = pd.Series(pop, index=lang)
df.plot.bar()
plt.title("The popularity of programming Languages")
plt.xlabel("Languages")
plt.ylabel("Popularity")
plt.show()
#Task5
#Write a Python programming to create a pie chart of the popularity of programming Languages.
lang = ["Java", "Python", "PHP", "JavaScript", "C#", "C++"]
pop = [22.2, 17.6, 8.8, 8, 7.7, 6.7]
df = | pd.Series(pop, index=lang) | pandas.Series |
import numpy as np
import pandas as pd
import hydrostats.data as hd
import hydrostats.visual as hv
import HydroErr as he
import matplotlib.pyplot as plt
import os
from netCDF4 import Dataset
# Put all the directories (different states and resolutions) and corresponding NetCDF files into lists.
list_of_files = []
list_of_dir = []
streamflow_dict = {}
list_streams = []
for i in os.listdir('/home/chrisedwards/Documents/rapid_output/mult_res_output'):
for j in os.listdir(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i)):
list_of_files.append(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i, j,
'Qout_erai_t511_24hr_19800101to20141231.nc'))
list_of_dir.append(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i, j))
list_of_dir.sort()
list_of_files.sort()
list_of_states=['az', 'id', 'mo', 'ny', 'or', 'col',
'az', 'id', 'mo', 'ny', 'or', 'col',
'az', 'id', 'mo', 'ny', 'or', 'col']
list_of_states.sort()
# Loop through the lists to create the csv for each stream, in each resolution.
for file, direc, state in zip(list_of_files, list_of_dir, list_of_states):
# Call the NetCDF file.
nc = Dataset(file)
nc.variables.keys()
nc.dimensions.keys()
# Define variables from the NetCDF file.
riv = nc.variables['rivid'][:].tolist()
lat = nc.variables['lat'][:]
lon = nc.variables['lon'][:]
Q = nc.variables['Qout'][:]
sQ = nc.variables['sQout'][:]
time = nc.variables['time'][:].tolist()
# Convert time from 'seconds since 1970' to the actual date.
dates = | pd.to_datetime(time, unit='s', origin='unix') | pandas.to_datetime |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = | timedelta_range('4 days', periods=5, freq='2D', name='x') | pandas.timedelta_range |
#!/usr/bin/env python
# coding=utf-8
"""Data manipulation and cost allocation helpers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import re
import math
import platform
import locale
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
import texttable
import six
from IPython.core.display import HTML
import functools
__author__ = '<NAME>'
__copyright__ = ' Copyright 2014-2019, Tartan Solutions, Inc'
__credits__ = ['<NAME>', '<NAME>']
__license__ = 'Apache 2.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
# default formatting for report_to_xl
report_formatting = {
'report_title': {
'size': 16,
'color': 2,
'background': (0, 32, 96),
'bold': True
},
'report_subtitle': {
'size': 12,
'color': 56,
'background': (149, 179, 215),
'bold': True,
'italic': False
},
'table_title': {
'size': 11,
'background': (199, 199, 199),
'bold': True
},
'table_subtitle': {
'size': 11,
'background': (242, 242, 242),
'bold': True
},
'table_header': {
'size': 8,
'bold': True
},
'table_data':{
'size': 8,
'word_wrap': False
},
'table_summary': {
'bold': True
}
}
if platform.system() == "Windows":
locale.setlocale(locale.LC_ALL, 'english_us') # <--this setting will be different in linux
else:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
def suppress_non_unicode(dirty_string):
"""Get rid of non-unicode characters.
WARNING: This loops through each character in strings.
Use sparingly.
http://stackoverflow.com/questions/20078816/replace-non-ascii-characters-with-a-single-space
"""
return '' .join([i if ord(i) < 128 else ' ' for i in dirty_string])
def clean_frame(df):
"""
for each column in the frame with dtype==object,
return the list of decoded cell in the Series instead
This presumes any type of object is intended to be a string. Be careful.
This feels inefficient as hell, but it freaking works.
"""
def clean_ascii(cell):
try:
return ''.join([c.decode('unicode_escape').encode('ascii', 'ignore') for c in cell]).replace('\n"', '"')
except Exception:
return ' '
for col in df:
if str(df[col].dtype) == 'object':
df[col] = list(map(clean_ascii, df[col]))
return df
def jupyter_table(input):
"""
Produce a pretty text table that will show up as such in Jupyter.
Use this by first importing importing into your notebook
from IPython.core.display import display, HTML
Then display table in jupyter like this
my_table = dh.get_text_table(df_nsv) #where dh = data_helpers
display(jupyter_table(my_table))
This is way better for readability than the way jupyter renders pandas tables.
Also, it will work on non-pandas stuff where browser-based
line wrapping is not desired.
"""
# TODO: Just make this an option of inspect
jupyter_wrapper = ''.join(['<div style="font-weight: normal;font-family: monospace;white-space:pre;">', input, '</div>'])
jt = HTML(jupyter_wrapper)
return jt
def cast_as_str(input_val):
"""Force a value to be of type 'str'.
Args:
input_val (mixed): Input of mixed type
Returns:
str: String of the value, defaults to ' ' if the value cannot be cast as an string
Examples:
>>> cast_as_str('3')
'3'
>>> cast_as_str(3.55)
'3.55'
>>> cast_as_str('Three')
'Three'
>>> cast_as_str(None)
' '
>>> cast_as_str(np.nan)
' '
>>> cast_as_str({})
'{}'
>>> cast_as_str(pd.NA)
' '
"""
if input_val is None:
return ' '
try:
if pd.isna(input_val) or np.isnan(input_val):
return ' '
else:
try:
return str(input_val)
except:
return ''.join([i if ord(i) < 128 else ' ' for i in input_val])
except:
try:
return str(input_val)
except:
return ''.join([i if ord(i) < 128 else ' ' for i in input_val])
def coalesce(*options):
"""
Args:
*options: A list of objects some of which might be None, ending in a default value.
Returns:
The leftmost object in the list that is not None, '', or NaN. If no
such objects exist, returns the rightmost object in the list, or None
if called with no arguments.
Examples:
>>> coalesce()
>>> coalesce(None)
>>> coalesce(None, 'a')
'a'
>>> coalesce(None, None, 'a')
'a'
>>> coalesce(None, 'a', 'b')
'a'
>>> coalesce('', 'a')
'a'
>>> coalesce(float('nan'), 'a')
'a'
>>> coalesce(None, None, 'c')
'c'
"""
# pseudocode for list comp version:
# [o for o in options if o is not None and o != '' and o is not NaN][0]
def coalesce2(option_a, option_b):
try:
if math.isnan(option_a):
return option_b
except:
pass
if option_a is None:
return option_b
elif option_a == '':
return option_b
else:
return option_a
return functools.reduce(coalesce2, options, None)
def cast_as_int(input_val):
"""
Args:
input_val: A value of unknown type
Returns:
Int of the value, defaults to 0 if the value cannot be cast as an int.
Examples:
>>> cast_as_int('3')
3
>>> cast_as_int(3.55)
3
>>> cast_as_int('Three')
0
>>> cast_as_int(None)
0
>>> cast_as_int(np.nan)
0
>>> cast_as_int(pd.NA)
0
"""
if input_val is None:
return 0
try:
if pd.isna(input_val) or np.isnan(input_val):
return 0
else:
try:
return int(input_val)
except ValueError:
return 0
except:
try:
return int(input_val)
except ValueError:
return 0
def cast_as_float(input_val):
"""Force a value to be of type 'float'.
Args:
input_val (mixed): Mixed input type
Examples:
>>> cast_as_float('3')
3.0
>>> cast_as_float(3.55)
3.55
>>> cast_as_float('Three')
0.0
>>> cast_as_float(None)
0.0
>>> cast_as_float(np.nan)
0.0
>>> cast_as_float(pd.NA)
0.0
"""
if input_val is None:
return 0.0
try:
if pd.isna(input_val) or np.isnan(input_val):
return 0.0
else:
try:
return float(input_val)
except ValueError:
return 0.0
except:
try:
return float(input_val)
except ValueError:
return 0.0
def num(num):
"""Make numbers pretty with comma separators."""
if math.isnan(num):
num = 0
return locale.format_string("%d", num, grouping=True)
def cols(input_frame, title='table', print_out=True, show_dtype=True, double_quotes=False):
"""
This is just a quick wrapper of get_columns with flipped defaults.
"""
return get_columns(input_frame, title, print_out)
def get_columns(input_frame, title='table', print_out=False):
"""
Accepts a pandas data frame and returns a reader-friendly listing of columns and datatype.
Very useful in debug mode for quick copy/paste when writing UDFs
Args:
input_frame (Pandas.Dataframe): Dataframe
title: (str): Title of table.
print_out: (boolean): print directly, so the user doesn't need to wrap the call in a print(). Useful for debugging
Returns:
string: print of a table
"""
col_info = ''
for column in input_frame.columns:
#col_info += (" '" + i + "', #" + str(input_frame[i].dtype)+"\n")
col_info += " '{}', #{}\n".format(column, input_frame[column].dtype)
if print_out:
print(col_info)
else:
return col_info
def list_columns(input_frame, title='table', print_out=True):
"""
Accepts a pandas data frame and returns a reader-friendly listing of columns and datatype.
Very useful in debug mode for quick copy/paste when writing UDFs
Args:
input_frame (Pandas.Dataframe): Dataframe
title: (str): Title of table.
print_out: (boolean): print directly, so the user doesn't need to wrap the call in a print(). Useful for debugging
Returns:
string: print of table columns for easy copy-paste into superset json config.
Was going to do this as a call to get_columns with a bunch of optional params set a certain way, but was faster to just
make a seprate method.
"""
col_info = ''
dq = '"'
for column in input_frame.columns:
#col_info += '"{}", '.format(column)
col_info += "{dq}{column}{dq}, \n".format(column=column, dq=dq)
if print_out:
print(col_info)
else:
return col_info
def inspect(input_frame, nrows=10, title='table', types=True, print_out=True):
"""This is just a quick wrapper of get_text_table with flipped defaults."""
return get_text_table(input_frame, nrows, title, types, print_out)
def get_text_table(df, nrows=10, title='table', types=True, print_out=False):
"""Creates a printed text table of a dataframe (or eventually other table-structured input).
Accepts a pandas data frame and returns a print-friendly table.
Args:
df (Pandas.Dataframe): Dataframe
nrows: (int): Number of rows printed
title: (str): Title of table.
types: (boolean): Indicate data type in column headers
print_out: (boolean): print directly, so the user doesn't need to wrap the call in a print(). Useful for debugging
Returns:
string: print of a table, or None, if print_out=True. In the later case, it just prints directly.
Todo: * Allow optional arguments to change table cosmetics such
as column width and borders.
* Could be expanded to check type of inbound recordset and
handle many different types of recordsets including:
pandas data frame
NumPy recarray
hdf5
list of lists / list of tuples
.. http://foutaise.org/code/texttable/
"""
frame_length = len(df)
frame_width = len(df.columns)
input_frame = df.copy(deep=True)
if nrows is not None:
input_frame = input_frame[:nrows] # do this early on, so we don't cast_as_foo() on stuff that we don't need.
col_types = ['i'] # inbound tables has an index of type int.
for i in input_frame.columns:
cur_type = input_frame[i].dtype
if cur_type == object:
col_types.append('t')
elif 'int' in str(cur_type):
# facepalm. Hey, it works, and with all flavors of int.
col_types.append('i')
elif cur_type == float:
col_types.append('f')
else:
col_types.append('a')
if types is True:
for i in input_frame.columns:
cur_type = input_frame[i].dtype
if cur_type == object:
input_frame.rename(columns={input_frame[i].name : input_frame[i].name + '::str'}, inplace=True)
elif 'int' in str(cur_type):
input_frame.rename(columns={input_frame[i].name : input_frame[i].name + '::int'}, inplace=True)
elif cur_type == float:
input_frame.rename(columns={input_frame[i].name : input_frame[i].name + '::float'}, inplace=True)
else:
input_frame.rename(columns={input_frame[i].name : input_frame[i].name + '::{}'.format(cur_type)}, inplace=True)
# TODO from Pat: Maybe this loop and the one above could be combined into a method - def gather_column_types(rename)
input_recarray = input_frame.iloc[0:nrows].to_records()
headers = input_recarray.dtype.names
records = input_recarray.tolist()
input_list = []
input_list.append(headers)
input_list.extend(records)
tt = texttable.Texttable(max_width=0)
tt.set_deco(texttable.Texttable.HEADER)
tt.set_cols_dtype(col_types)
tt.add_rows(input_list)
output = '------------------\n' +\
title + ': ' + num(frame_length) + ' records, ' + num(frame_width) +\
' columns \n------------------\n' + tt.draw() + '\n'
if print_out is True:
print(output)
else:
return output
def mask(df, key, value):
"""
Useful for daisy-chaining dataframe filters
"""
return df[df[key] == value]
def not_equal(df, key, value):
"""
Useful for daisy-chaining dataframe filters
"""
return df[df[key] != value]
def in_list(df, key, value):
"""
Useful for daisy-chaining dataframe filters
"""
return df[df[key] in value]
def not_in_list(df, key, value):
"""
Useful for daisy-chaining dataframe filters
"""
return df[df[key] not in value] # maybe this should be df[~df[key] in value]
def is_equal(df, key, value):
"""
Useful for daisy-chaining dataframe filters
"""
return df[df[key] == value]
def clean_names(frame):
"""
Delete input columns "foo__in" from frame.
Clean output columns "foo__out" -> "foo"
#TODO: Delete or clean WIP columns...not sure what to do yet ("foo__bar")
"""
col_list = list(frame.columns)
for col in col_list:
if col.endswith('__in'):
del frame[col]
elif col.endswith('__out'):
frame.rename(columns={col: col[:-5]}, inplace=True)
elif col.endswith('__value'):
del frame[col]
elif col.endswith('__split'):
frame.rename(columns={col: col[:-7]}, inplace=True)
return frame
def remove_nan_values_from_dict(d):
"""
removes any nan values from a dict
input: d (type:dict)
return: no_nan_dict (type:dict)
"""
no_nan_dict = {}
for key, val in d.items():
if | pd.notnull(val) | pandas.notnull |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script for plotting the recorded C-V measurement curves,
including derived depletion depth and effective charge carrier concentration
(Neff) plots.
@author: <NAME>
@date: March 2019
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from os import listdir
from matplotlib.collections import PathCollection
from matplotlib.legend_handler import HandlerPathCollection
import decimal as D
mpl.rcParams['font.size']=13 #default font size
# enable for cleaning the regular mico pattern in the Neff plots, suggests data
# is affeted by rounding errors introduced by the instrument
FILTER_ROUNDING_ARTEFACTS = False
## constants
e0 = 8.854e-14 #F/cm
eSi = 11.9 #for Si
q = 1.602e-19 # charge in Coulomb = F/V
A = 0.0702 # diode area in cm^2
### helper functions
def oneOverX(x, a, b,c,d,e):
#return a * np.exp(-b * x) + c
return (a/(b * (x**c + d))) + e
def depth(C):
return eSi*e0*A/C
def Neff(oneOverCsq,dV):
oneOverCsq=oneOverCsq #*1e15
# following https://cds.cern.ch/record/1169276/files/04636908.pdf
# unit analysis suggests Neff output is in [1/cm]
dCdV = np.gradient(oneOverCsq,dV) #/1e15
return 2/(q * eSi * e0 * (A**2) * dCdV)
### read measurement series and prepare data
df = pd.DataFrame()
folder = "../data/high_resolution_CV/"
filepaths = [f for f in listdir(folder) if f.endswith('.csv')]
filepaths.sort()
count = 0
for file in filepaths:
with open(folder+file) as f:
head = [next(f) for x in range(15)]
deviceName = head[9].split()[2]
#print(deviceName)
anode = 'Vanode'
# using Decimal here to preserve original precision of numbers (reaveals rounding errors in data)
df_new = | pd.read_csv(folder+file, skiprows=1407, sep=',' , converters={anode: D.Decimal, 'Cp': D.Decimal}, engine='c', skipinitialspace= True, float_precision='round_trip') | pandas.read_csv |
import os
from typing import List, Union
import requests
import pandas as pd
API_URL = "https://love-the-air.herokuapp.com/api"
def get_states() -> List[str]:
"""
Returns a List of Available States
Examples
--------
>>> states = client.get_states()
>>> states
['Andhra Pradesh','Karnataka', 'Assam',....]
"""
r = requests.get(f'{API_URL}/states')
status = r.status_code
if status != 200:
raise Exception(f'failed to fetch states {r.status_code}')
return r.json()['states']
def get_cities(state: str) -> List[str]:
"""
Return a list of cities in given state
Parameters
----------
state : string
Name of the state for which,
the names of cities are required
Examples
--------
Obtaining Cities using state name
>>> cities = client.get_cities(state='Kerala')
>>> cities
['Eloor', 'Ernakulam', 'Kannur', 'Kochi', 'Kollam', 'Kozhikode', 'Thiruvananthapuram']
"""
r = requests.get(f'{API_URL}/state/{state}')
status = r.status_code
if status != 200:
raise Exception(f'failed to fetch cities with status:{status}')
return r.json()['cities']
def get_stations(city: str) -> List[dict]:
"""
Return list of dictionary for each station in a given city
Parameters
----------
city: str
Name of the city for which,
the names of stations and station codes
are required
Examples
--------
Obtaining stations using city name
>>> stations = client.get_stations('Kollam')
>>> stations
[{'id': 'site_5334', 'live': True, 'name': 'Polayathode, Kollam - Kerala PCB'}]
"""
r = requests.get(f'{API_URL}/city/{city}')
status = r.status_code
if status != 200:
raise Exception(f'failed to fetch stations with status:{status}')
return r.json()['stations']
def get_data(from_date: str, to_date: str,
station_id: str, criteria: str) -> pd.DataFrame:
"""
Return a pandas dataframe for selected station in given
time range.
Parameters
----------
from_date : str/ISO datetime
Starting Date from which data is required
to_date : str/ISO datetime
End Date until which the date is required
station_id : str
Station Id as listed in the station dictionary
criteria: str
Frequency of data required
Supported Criteria
- 24 Hours
- 8 Hours
- 4 Hours
- 1 Hours
- 30 Minute
- 15 Minute
Examples
--------
>>> data = client.get_data(from_date='01-01-2020',
to_date='01-01-2020',
criteria='24 Hours',
station_id='site_273')
>>> data
from date AT BP Benzene NO NO2 NOx Ozone PM2.5 RH SO2 SR Toluene VWS WD WS Xylene to date
0 01-Jan-2020 - 00:00 15.57 732.58 8.03 21.52 59.95 30.89 28.57 274.89 76.24 17.72 58.18 15.94 -1.33 188.71 0.76 9.93 02-Jan-2020 - 00:00
"""
payload = {
"from_date": from_date,
"to_date": to_date,
"station_id": station_id,
"criteria": criteria
}
r = requests.post(f'{API_URL}/data', json=payload)
status = r.status_code
if status == 422:
print(r.json())
elif status == 200:
return _format(json_data=r.json())
def _format(json_data: dict) -> pd.DataFrame:
"""
Return a well formatted Dataframe from json
Parameters
----------
json_data: json response from request response
"""
data = json_data['data']
if not data:
raise Exception("API returned empty data")
else:
df = | pd.DataFrame(data=data) | pandas.DataFrame |
""" Function that will write intermediate .dat file for Igor processing"""
import xarray as xr
import pandas as pd
import numpy as np
import datetime
import numpy as np
def write_dat(ds, file_name):
"""
This writes the .dat files that generate the intermediate parameters used
by the Igor processing.
Parameters
----------
ds: xarray Dataset
The dataset containing the processed signal data.
file_name: str
The name of the file to save to.
"""
### Convert the dataset to a Pandas DataFrame
index_label = ["TimeWave", "DateTimeWaveUTC",
"DateTimeWave", "EventIndex",
"Flag", "Base_ch0", "FtAmp_ch0", "FtPos_ch0",
"PkHt_ch0", "PkPos_ch0", "PkFWHM_ch0",
"GaussChiSq_ch0", "GaussErrorCode_ch0",
"PkStart_ch0", "Base_ch1", "PkHt_ch1",
"PkPos_ch1", "PkStart_ch1", "PkEnd_ch1",
"PkHalfRise_ch1", "PkHalfDecay_ch1",
"Peak2area_ch1", "Base_ch2",
"PkHt_ch2", "PkPos_ch2", "PkStart_ch2",
"PkEnd_ch2", "PkHalfRise_ch2", "PkHalfDecay_ch2",
"Peak2area_ch2", "Base_ch3", "PkHt_ch3",
"PkPos_ch3", "PkSplitPos_ch3", "Base_ch4",
"FtAmp_ch4", "FtPos_ch4", "PkHt_ch4",
"PkPos_ch4", "PkFWHM_ch4", "GaussChiSq_ch4",
"GaussErrorCode_ch4", "PkStart_ch4",
"Base_ch5", "PkHt_ch5", "PkPos_ch5",
"PkStart_ch5", "PkEnd_ch5", "PkHalfRise_ch5",
"PkHalfDecay_ch5", "Peak2area_ch5", "Base_ch6",
"PkHt_ch6", "PkPos_ch6", "PkStart_ch6", "PkEnd_ch6",
"PkHalfRise_ch6", "PkHalfDecay_ch6", "Peak2area_ch6",
"Base_ch7", "PkHt_ch7", "PkPos_ch7", "PkSplitPos_ch7",
"IncanRatioch5ch6", "IncanPkOffsetch5ch6",
"IncanRatioch1ch2", "IncanPkOffsetch1ch2",
"ScatRejectKey", "IncanRejectKey"]
drop_list = []
for varname in ds.variables.keys():
if varname not in index_label:
drop_list.append(varname)
smaller_ds = ds.drop(drop_list)
pandas_ds = smaller_ds.to_dataframe()
sp2_header = ["Instrument Type=SP2\n", "****\n"]
# Round some entries to fewer figures
pandas_ds['TimeWave'] = pandas_ds['TimeWave'].map(lambda x: "%.16g" % x)
pandas_ds['DateTimeWave'] = pandas_ds['DateTimeWave'].map(lambda x: "%.16g" % x)
pandas_ds['DateTimeWaveUTC'] = pandas_ds['DateTimeWaveUTC'].map(lambda x: "%.16g" % x)
pandas_ds["GaussChiSq_ch0"] = pandas_ds["GaussChiSq_ch0"].map(lambda x: "%.16g" % x)
pandas_ds["GaussChiSq_ch4"] = pandas_ds["GaussChiSq_ch4"].map(lambda x: "%.16g" % x)
pandas_ds["GaussChiSq_ch0"] = pandas_ds["GaussChiSq_ch0"].replace('nan', '')
pandas_ds["GaussChiSq_ch4"] = pandas_ds["GaussChiSq_ch4"].replace('nan', '')
pandas_ds["GaussErrorCode_ch0"] = pandas_ds["GaussChiSq_ch0"].map(lambda x: x*0)
pandas_ds["GaussErrorCode_ch4"] = pandas_ds["GaussChiSq_ch4"].map(lambda x: x*0)
pandas_ds["IncanRatioch1ch2"] = pandas_ds["IncanRatioch1ch2"].map(lambda x: "%.16g" % x)
pandas_ds["IncanRatioch5ch6"] = pandas_ds["IncanRatioch5ch6"].map(lambda x: "%.16g" % x)
pandas_ds["IncanRatioch1ch2"] = pandas_ds["IncanRatioch1ch2"].replace('nan', '')
pandas_ds["IncanRatioch5ch6"] = pandas_ds["IncanRatioch5ch6"].replace('nan', '')
with open(file_name, 'w', newline='\n') as f:
for line_in_header in sp2_header:
f.write(line_in_header)
pandas_ds = pandas_ds[index_label]
#print(pandas_ds)
pandas_ds.to_csv(f, header=True, index=False, float_format="%.8g", sep='\t', encoding='utf-8')
def write_dat_concs(ds, file_name):
"""
This writes the .dat files for the mass and number concentrations
by the Igor processing.
Parameters
----------
ds: xarray Dataset
The dataset containing the processed signal data.
file_name: str
The name of the file to save to.
"""
pandas_df = | pd.DataFrame() | pandas.DataFrame |
import os, pickle
import pandas as pd
import numpy as np
import seaborn as sns
import statistics
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import missingno as msno
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
import sklearn
from sklearn.feature_selection import SelectPercentile, f_classif
from src.config import Config
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 500)
class Analysis(Config):
def __init__(self):
self.data = {}
def read_file(self, fname=None):
try:
if fname is None:
fname = os.path.join(Config.DATA["INPUT_PATH"])
print("Reading file: {} ...".format(fname))
data = pd.read_csv(fname)
for col in data.columns:
if len(data[col].unique()) < 20 or col in ["12", "64", "95", "target"]:
data[col] = data[col].astype("category")
print("Data import complete for file: {} ...".format(fname))
return data
except FileNotFoundError:
print(fname)
print("File {} is not found ... Please specify the correct path in config.py".format(fname))
def summary_statistics(self, data, dtype):
if dtype == "numerical":
df_stats_num = data.select_dtypes(["float", "int"]).describe()
kurtosis_list = []
skewness_list = []
numerical_column_list = [col for col in df_stats_num]
for col in df_stats_num:
kurtosis_list.append(data[col].kurtosis())
skewness_list.append(data[col].skew())
new_dict_kurtosis = dict(zip(numerical_column_list,kurtosis_list))
new_dict_skewness = dict(zip(numerical_column_list,skewness_list))
new_rows_kurtosis = pd.Series(data = new_dict_kurtosis, name='kurtosis')
new_rows_skewness = pd.Series(data = new_dict_skewness, name='skewness')
# Append the series of kurtosis and skewness to the .describe() dataframe
df_stats_num = df_stats_num.append(new_rows_kurtosis, ignore_index=False)
df_stats_num = df_stats_num.append(new_rows_skewness, ignore_index=False)
if (len(data) > 10):
df_stats_num = pd.DataFrame(df_stats_num.transpose())
# Set skewness and kurtosis type
df_stats_num.loc[df_stats_num['kurtosis'] < 3 , 'kurtosis type'] = 'Platykurtic' # thin tails
df_stats_num.loc[df_stats_num['kurtosis'] == 3 , 'kurtosis type'] = 'Normal - Mesokurtic'
df_stats_num.loc[df_stats_num['kurtosis'] > 3 , 'kurtosis type'] = 'Leptokurtic' # heavy tails
df_stats_num.loc[df_stats_num['skewness'] < 0, 'skewness type'] = 'Negatively Skewed'
df_stats_num.loc[df_stats_num['skewness'] == 0, 'skewness type'] = 'Symmetrical'
df_stats_num.loc[df_stats_num['skewness'] > 0, 'skewness type'] = 'Positively Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > -0.5) & (df_stats_num['skewness'] < 0.5), 'skewness lvl'] \
= 'Fairly Symmetrical'
df_stats_num.loc[(df_stats_num['skewness'] > -1.0) & (df_stats_num['skewness'] < -0.5) , 'skewness lvl'] \
= 'Moderately Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > 0.5) & (df_stats_num['skewness'] < 1.0), 'skewness lvl'] \
= 'Moderately Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > 1.0) | (df_stats_num['skewness'] < -1.0), 'skewness lvl'] \
= 'Highly Skewed'
final_df = df_stats_num
elif dtype == "categorical":
df_stats_cat = data.select_dtypes(["category"]).describe()
if (len(data) > 10):
df_stats_cat = pd.DataFrame(df_stats_cat.transpose())
final_df = df_stats_cat
return final_df
def categorical_barplot(self, data, col, xlabel, title, type="standard"):
fig, ax = plt.subplots(figsize=(15, 5))
if type == "standard":
try:
cat_index = np.unique(data[col], return_counts=True)[0]
cat_df = pd.DataFrame(np.unique(data[col], return_counts=True)[1], index=cat_index)
y = list(cat_df[0])
except:
cat_df = pd.DataFrame(data[col].value_counts())
y = cat_df.iloc[:,0]
x = list(cat_df.index)
elif type == "missing":
x = list(data[col].index)
y = list(data[col])
ax.bar(x, y, color=['grey', 'red', 'green', 'blue', 'cyan'])
for i in range(len(x)):
ax.text(i, y[i], y[i], ha = 'center')
ax.set_title(title, fontsize=14)
ax.set_xlabel(xlabel, fontsize=14)
ax.set_ylabel(col, fontsize=14)
return fig
def data_scaling(self, data):
X = data.loc[:, ~data.columns.isin(['target'])].values
y = data.loc[:,['target']].values
X = pd.DataFrame(StandardScaler().fit_transform(X))
normalized_data= pd.concat([X, pd.DataFrame(y)], axis=1)
return X
def boxplot(self, X, col, start_col, end_col):
if col == 0:
fig, ax = plt.subplots(figsize=(20,8))
sns.boxplot(x="variable", y="value", data=pd.melt(X.iloc[:,:col+11]), ax=ax)
else:
fig, ax = plt.subplots(figsize=(20,8))
sns.boxplot(x="variable", y="value", data=pd.melt(X.iloc[:,start_col:end_col]), ax=ax)
start_col = end_col
end_col = end_col+11
return fig, start_col, end_col
def control_chart(self, data, col, filter=None, type='x'):
if col != "target":
np.random.seed(Config.ANALYSIS_CONFIG["RANDOM_SEED"])
x = data.loc[:,col]
MR = [np.nan]
# Get and append moving ranges
i = 1
for _ in range(1, len(x)):
MR.append(abs(x[i] - x[i-1]))
i += 1
MR = pd.Series(MR)
# Concatenate mR Series with and rename columns
data_plot = pd.concat([x,MR, data.target], axis=1)
data_plot.columns = ["x", "mR", "target"]
if filter is not None:
temp_plot = data_plot[filter:].reset_index(drop=True)
else:
temp_plot = data_plot
# Plot x and mR charts
fig, axs = plt.subplots(1, figsize=(15,7), sharex=True)
# x chart
if type == "x":
xchart = axs.scatter(temp_plot.index, temp_plot['x'], linestyle='-', marker='o', c=temp_plot['target'])
axs.axhline(statistics.mean(data_plot['x']), color='blue')
axs.axhline(statistics.mean(data_plot['x']) + \
3*statistics.mean(data_plot['mR'][1:len(data_plot['mR'])])/1.128, color = 'red', linestyle = 'dashed')
axs.axhline(statistics.mean(data_plot['x']) - \
3*statistics.mean(data_plot['mR'][1:len(data_plot['mR'])])/1.128, color = 'red', linestyle = 'dashed')
axs.set_title('X-chart for column: {}'.format(col))
axs.legend(*xchart.legend_elements())
axs.set(xlabel='Unit', ylabel='Value')
# mR chart
elif type == "mR":
mRchart = axs.scatter(temp_plot.index, temp_plot['mR'], linestyle='-', marker='o', c=temp_plot['target'])
axs.axhline(statistics.mean(data_plot['mR'][1:len(data_plot['mR'])]), color='blue')
axs.axhline(statistics.mean(data_plot['mR'][1:len(data_plot['mR'])]) + \
3*statistics.mean(data_plot['mR'][1:len(data_plot['mR'])])*0.8525, color='red', linestyle ='dashed')
axs.axhline(statistics.mean(data_plot['mR'][1:len(data_plot['mR'])]) -
3*statistics.mean(data_plot['mR'][1:len(data_plot['mR'])])*0.8525, color='red', linestyle ='dashed')
axs.set_ylim(bottom=0)
axs.set_title('mR Chart for column: {}'.format(col))
axs.legend(*mRchart.legend_elements())
axs.set(xlabel='Unit', ylabel='Range')
return fig
def outlier_identification(self, data, selected_cols, mode='feature_engineering'):
for col in selected_cols:
x = data.loc[:,col]
MR = [np.nan]
i = 1
for _ in range(1, len(x)):
MR.append(abs(x[i] - x[i-1]))
i += 1
MR = pd.Series(MR)
temp_data = pd.concat([x, MR, data.target], axis=1)
temp_data.columns = ["x", "mR", "target"]
ucl = statistics.mean(temp_data['x'])+3*statistics.mean(temp_data['mR'][1:len(temp_data['mR'])])/1.128
lcl = statistics.mean(temp_data['x'])-3*statistics.mean(temp_data['mR'][1:len(temp_data['mR'])])/1.128
if mode == 'feature_engineering':
# We flag out the data points that lie outside the ucl and lcl
# Assumption: Target is not available for prediction
data[col+"_flag"] = ((temp_data["x"] < lcl) | (temp_data["x"] > ucl))*1
data[col+"_flag"] = data[col+"_flag"].astype('category')
elif mode == 'outlier_removal':
# Remove outliers if data belongs to majority class
mask = ((temp_data["x"] < lcl) | (temp_data["x"] > ucl)) & (temp_data["target"].astype("int") == 0)
if mask.any():
temp_data.loc[mask,"x"] = np.nan
data[col] = temp_data["x"]
return data
def missingness_heatmap(self, data):
df_missing = data.loc[:, data.isna().any()]
df_missing = df_missing.isna()
missing_cor = df_missing.corr(method='kendall')
mask = np.triu(np.ones_like(missing_cor, dtype=bool))
mask_df = missing_cor.mask(mask)
check = [c for c in mask_df.columns if any(mask_df[c] > 0.1)]
pair = []
correlation = []
if len(check) > 0:
for col in mask_df.columns:
for index in mask_df.index:
if mask_df.loc[index, col] >= 0.4:
pair.append(str(index+" & "+ col))
correlation.append(np.round(mask_df.loc[index, col], 2))
df = pd.DataFrame({'pair': pair, 'correlation': correlation})
df.sort_values(by="correlation", ascending=False, inplace=True)
return df
def missingness_analysis(self, data, type="matrix"):
"""
Display missing data analysis matrix chart and missing data heatmap.
Args:
data (dataframe): Output from read_input()
"""
missing_col = data.isnull().sum()
percent_missing_col = round(missing_col * 100 / len(data), 2)
fig, ax = plt.subplots(figsize=(15, 5))
if type == "matrix":
msno.matrix(data, ax=ax)
elif type == "bar":
msno.bar(data, ax=ax)
return fig
def missingness_class(self, data):
class0 = data.loc[data.target==0]
missing_data_class0 = pd.DataFrame(class0.isna().sum()[class0.isna().sum() != 0], columns=["class_0"])
class1 = data.loc[data.target==1]
missing_data_class1 = pd.DataFrame(class1.isna().sum()[class1.isna().sum() != 0], columns=["class_1"])
class2 = data.loc[data.target==2]
missing_data_class2 = pd.DataFrame(class2.isna().sum()[class2.isna().sum() != 0], columns=["class_2"])
class3 = data.loc[data.target==3]
missing_data_class3 = pd.DataFrame(class3.isna().sum()[class3.isna().sum() != 0], columns=["class_3"])
class4 = data.loc[data.target==4]
missing_data_class4 = pd.DataFrame(class4.isna().sum()[class4.isna().sum() != 0], columns=["class_4"])
final_df = pd.concat([missing_data_class0, missing_data_class1, missing_data_class2, missing_data_class3,\
missing_data_class4], axis=1)
fig, ax = plt.subplots(figsize=(15, 5))
colors = ['grey', 'red', 'green', 'blue', 'cyan']
final_df.plot.bar(stacked=True,
color=colors,
figsize=(10,7),
ax=ax,
title = "Missingness Count by Target Class",
xlabel = "Input Variables",
ylabel= "Missingness Count",
fontsize=14)
return fig
def missingness_correlation(self, data):
high_cor_missing = self.missingness_heatmap(data)
if len(high_cor_missing) > 0:
print('Column pairs with similar pattern of missingness:- \n')
return msno.heatmap(data)
else:
if data.isnull().sum().sum() == 0:
print('There are no missing data in the columns.')
else:
print('There is only one column that has missing data, therefore no coorelation can be done.')
def mice_imputation(self, data):
MICE_imputer = IterativeImputer(random_state=Config.ANALYSIS_CONFIG["RANDOM_SEED"])
imputed_df = MICE_imputer.fit_transform(data)
return imputed_df
def data_transformation(self, data):
summary_numerical = self.summary_statistics(data, "numerical")
filter_data = data.loc[:, ~data.columns.isin(Config.ANALYSIS_CONFIG["BITRIMODAL_DISTRIBUTION"])]
sym_data = data.loc[:, data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] ==\
"Fairly Symmetrical"].index)]
mskew_data = filter_data.loc[:, filter_data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] \
== "Moderately Skewed"].index)]
hskew_data = filter_data.loc[:, filter_data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] \
== "Highly Skewed"].index)]
mpskew_data = mskew_data.loc[:,(mskew_data>=0).all()]
mpskew_tdata = mpskew_data.copy()
for col in mpskew_data.columns:
mpskew_tdata["{}_sqrt".format(col)] = np.sqrt(mpskew_data.loc[:,col])
mnskew_data = mskew_data.loc[:,(mskew_data<0).any()]
mnskew_tdata = mnskew_data.copy()
for col in mnskew_data.columns:
mnskew_tdata["{}_sqrt".format(col)] = np.sqrt(max(mnskew_data.loc[:, col]+1) - mnskew_data.loc[:, col])
hpskew_data = hskew_data.loc[:,(hskew_data>=0).all()]
hpskew_tdata = hpskew_data.copy()
for col in hpskew_data.columns:
hpskew_tdata["{}_log".format(col)] = np.log(hpskew_data.loc[:,col])
hnskew_data = hskew_data.loc[:,(hskew_data<0).any()]
hnskew_tdata = hnskew_data.copy()
for col in hnskew_data.columns:
hnskew_tdata["{}_log".format(col)] = np.log(max(hnskew_data.loc[:, col]+1) - hnskew_data.loc[:, col])
combined_dict = dict(
SYMMETRICAL_DATA = sym_data,
MODPOS_ORIGINAL = mpskew_data,
MODNEG_ORIGINAL = mnskew_data,
HIGHPOS_ORIGINAL = hpskew_data,
HIGHNEG_ORIGINAL = hnskew_data,
MODPOS_TRANSFORMED = mpskew_tdata.loc[:, mpskew_tdata.columns.str.contains("sqrt")],
MODNEG_TRANSFORMED = mnskew_tdata.loc[:, mnskew_tdata.columns.str.contains("sqrt")],
HIGHPOS_TRANSFORMED = hpskew_tdata.loc[:, hpskew_tdata.columns.str.contains("log")],
HIGHNEG_TRANSFORMED = hnskew_tdata.loc[:, hnskew_tdata.columns.str.contains("log")],
TARGET = data[["target"]]
)
combined_df = pd.concat([df for k, df in combined_dict.items()], axis=1)
transform_numerical = self.summary_statistics(combined_df, "numerical")
return combined_dict, transform_numerical
def histogram_plot(self, data, type="before", grid_cols = 5):
if type == "after":
combined_dict, _ = self.data_transformation(data)
mskew_original = | pd.concat([combined_dict["MODPOS_ORIGINAL"], combined_dict["MODNEG_ORIGINAL"]], axis=1) | pandas.concat |
#!/usr/bin/env python
from sklearn.cross_validation import train_test_split
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import Imputer
random_seed=1225
train_x_csv=".../train.csv"
train_master_csv =".../train_master.csv"
test_x_csv =".../test.csv"
#get the y
train_master= | pd.read_csv(train_master_csv) | pandas.read_csv |
def zoneConcentration(shp_gdf, raw, pntLst, bufr=None):
from downloading_funcs import addr_shape, down_extract_zip
import pandas as pd
import geopandas as gpd
pnt = pntLst[0]
pnt_isCalled = pntLst[1]
for url in pnt:
if url[-3:] == 'zip':
pnt = url
assert isinstance(pnt, str) #Must extract a zipfile from pnt!
#Convenience assignment of projection type
crs='EPSG:4326'
#Extract and read points into memory
pnt = down_extract_zip(pnt)
ftr = gpd.read_file(pnt, crs=crs)
#Flag properties within distance "bufr" of featured locations
if not bufr:
bufr = 1/250 #Hard to say what a good buffer is.
assert isinstance(bufr, float) #buffer must be float!
#Frame up the buffer shapes
ftr.geometry = ftr.geometry.buffer(bufr)
ftr['flag'] = 1
if 'NAME' in ftr:
ftr.drop(['NAME'], axis=1, inplace=True)
#Frame up the raw address points data
pointy = raw[['NAME', 'Points', 'dummy_counter']]
pointy = gpd.GeoDataFrame(pointy, crs=ftr.crs,
geometry=pointy.Points)
pointy = gpd.sjoin(pointy, ftr,
how='left', op='intersects')
denom = pointy.groupby('NAME').sum()
denom = denom.dummy_counter
numer = pointy.groupby('NAME').sum()
numer = numer.flag
pct_ftr_coverage = pd.DataFrame(numer/denom)
pct_ftr_coverage.columns = [
pnt_isCalled
]
pct_ftr_coverage.fillna(0, inplace=True)
pct_ftr_coverage.crs = pointy.crs
shp_gdf = shp_gdf.merge(pct_ftr_coverage,
how="left", left_on='NAME', right_index=True)
del pct_ftr_coverage, raw, pointy, denom, numer
return shp_gdf
del shp_gdf
def pointInZone(shp_gdf, raw, zoneLst):
from downloading_funcs import addr_shape, down_extract_zip
import pandas as pd
import geopandas as gpd
zone = zoneLst[0]
zone_isCalled = zoneLst[1]
for url in zone:
if url[-3:] == 'zip':
zone = url
assert isinstance(zone, str) #Must extract a zipfile from pnt!
#Convenience assignment of projection type
crs='EPSG:4326'
#Extract and read points into memory
zone = down_extract_zip(zone)
zone = gpd.read_file(zone, crs=crs)
zone['flag'] = 1
if 'NAME' in zone:
zone.drop(['NAME'], axis=1, inplace=True)
#Frame up the raw address points data
pointy = raw[['NAME', 'Points', 'dummy_counter']]
pointy = gpd.GeoDataFrame(pointy, crs=zone.crs,
geometry=pointy.Points)
pointy = gpd.sjoin(pointy, zone,
how='left', op='intersects')
numer = pointy.groupby('NAME').sum()
numer = numer.flag
inzone = pointy.groupby('NAME').sum()
inzone = inzone.dummy_counter #This was calling denom.dummy_counter which is undeclared
flaginzone = pd.DataFrame(inzone)
flaginzone.columns = [
zone_isCalled
]
flaginzone.fillna(0, inplace=True)
flaginzone.crs = pointy.crs
shp_gdf = shp_gdf.merge(flaginzone,
how="left", left_on='NAME', right_index=True)
del flaginzone, pointy, inzone, numer, raw
return shp_gdf
del shp_gdf
def oecdGdpQs(shp_gdf, raw, url, i=None):
#This extracts U.S. GDP on a quarterly
#basis to the correct time unit of analysis
import numpy as np
import pandas as pd
import geopandas as gpd
if not 'Q_GDP' in shp_gdf.columns:
shp_gdf['Q_GDP'] = 0
Qbins = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
yr = round(i)
q = round((i-yr)*100)
assert q < 14
for ij in range(0, 4):
if q in Qbins[ij]:
q = 'Q'+ str(ij+1)
df = pd.read_csv(url[0], encoding='utf-8')
df = df[df.LOCATION == 'USA']
df[['q', 'yr']]= df.Time.str.split('-', expand=True)
df['q'] = df['q'].astype(str)
df['yr'] = df['yr'].astype(int)
df = df[(df.q == q)]
df = df[(df.yr == yr)]
i_gdp = list(df['Value'])
i_gdp = i_gdp[0]
shp_gdf['Q_GDP'][shp_gdf['month']==i] = i_gdp
return shp_gdf
del shp_gdf
def metro_prox(shp_gdf, raw, bufr=None):
#Flag properties within distance "bufr" of metro stations
from downloading_funcs import addr_shape, down_extract_zip
import pandas as pd
import geopandas as gpd
if not bufr:
bufr = 1/250 #Hard to say what a good buffer is.
assert isinstance(bufr, float) #buffer must be float!
#Frame up the metro buffer shapes
metro = down_extract_zip(
'https://opendata.arcgis.com/datasets/54018b7f06b943f2af278bbe415df1de_52.zip'
)
metro = gpd.read_file(metro, crs=shp_gdf.crs)
metro.geometry = metro.geometry.buffer(bufr)
metro['bymet'] = 1
metro.drop(['NAME'], axis=1, inplace=True)
#Frame up the raw address points data
pointy = raw[['NAME', 'Points', 'dummy_counter']]
pointy = gpd.GeoDataFrame(pointy, crs=metro.crs,
geometry=pointy.Points)
pointy = gpd.sjoin(pointy, metro,
how='left', op='intersects')
denom = pointy.groupby('NAME').sum()
denom = denom.dummy_counter
numer = pointy.groupby('NAME').sum()
numer = numer.bymet
pct_metro_coverage = pd.DataFrame(numer/denom)
pct_metro_coverage.columns = [
'pct_metro_coverage'
]
pct_metro_coverage.fillna(0, inplace=True)
pct_metro_coverage.crs = pointy.crs
shp_gdf = shp_gdf.merge(pct_metro_coverage,
how="left", left_on='NAME', right_index=True)
return shp_gdf
def clim_ingest(shp_gdf, raw, filepath, i=None):
#Adds monthly average, max and min temp, from National Airport
import numpy as np
import pandas as pd
import geopandas as gpd
#NOAA NCDC data mining is not worth implementing in this workflow
#Pull the data from disk
df = pd.read_csv(filepath)
#Only want National Airport
df = df[df.NAME == 'WASHINGTON REAGAN NATIONAL AIRPORT, VA US']
#Express the dates as datetime objects
df.DATE = pd.to_datetime(df.DATE)
yr = round(i)
month = round((i-yr)*100)
#Narrow it down to just the one row that matches "i"
df = df[df.DATE.dt.year == yr]
df = df[df.DATE.dt.month == month]
assert df.shape[0] == 1 #Only one row should match "i"
for tag in ['TAVG', 'TMAX', 'TMIN']: #iterate thru values we want
#Establishes the column if needed
if not tag in shp_gdf.columns:
shp_gdf[tag] = 0
#Extract the value of df[tag]
val = list(df[tag])
val = val[0]
#Assign the extracted value to all shp_gdf[tag] rows where 'month' is t-i
shp_gdf[tag][shp_gdf['month']==i] = val
return shp_gdf
del shp_gdf
def ITSPExtract(shp_gdf, raw, i=None):
"""Read in tax extract data, pare it down to month i,
spatial join on the shape geodataframe shp_gdf. Return shp_gdf.
"""
from downloading_funcs import addr_shape, down_extract_zip
import pandas as pd
from shapely.geometry import Point, Polygon
import geopandas as gpd
crs='EPSG:4326'
df = pd.read_csv('./data/Integrated_Tax_System_Public_Extract.csv')
df.SALEDATE = pd.to_datetime(df.SALEDATE)
yr = round(i)
month = round((i-yr)*100)
#Narrow it down to just the one row that matches "i"
df = df[df.SALEDATE.dt.year == yr]
df = df[df.SALEDATE.dt.month == month]
df = df.sort_values(['SALEDATE'])
df = df.reset_index(drop=True)
#ITSPE has no geospatial data, so we need to merge on addresspoints.
adr_df = pd.read_csv('./data/Address_Points.csv')
#Regex to clean off the regime codes and any other NaN.
adr_df['SSL'] = adr_df['SSL'].str.replace(r'\D+', '')
df['SSL'] = df['SSL'].str.replace(r'\D+', '')
adr_df = | pd.merge(adr_df, df, how='inner', on=['SSL', 'SSL'], suffixes=['', '_tax']) | pandas.merge |
from contextlib import contextmanager
import pytest
import pandas as pd
from cobra.model_building.models import LogisticRegressionModel, LinearRegressionModel
from cobra.model_building.forward_selection import ForwardFeatureSelection
@contextmanager
def does_not_raise():
yield
def mock_data(add_split_col: bool=False, model_type="classification"):
data = pd.DataFrame({"var1_enc": [0.42] * 10,
"var2_enc": [0.94] * 10,
"var3_enc": [0.87] * 10})
if model_type == "classification":
data["target"] = ([0] * 5 + [1] * 2 + [0] * 2 + [1])
elif model_type == "regression":
data["target"] = [7, 2, 2, 9, 7, 3, 1, 4, 8, 5]
if add_split_col:
data.loc[:, "split"] = (["train"] * 7 + ["selection"] * 3)
return data
def mock_model_num_pred(n_predictors, model_type="classification"):
predictors = [f"var{i + 1}_enc" for i in range(n_predictors)]
return mock_model(predictors, model_type)
def mock_model(predictor_list, model_type="classification"):
if model_type == "classification":
model = LogisticRegressionModel()
elif model_type == "regression":
model = LinearRegressionModel()
model.predictors = predictor_list
return model
class TestForwardFeatureSelection:
def test_get_model_from_step(self):
forward_selection = ForwardFeatureSelection()
with pytest.raises(ValueError):
forward_selection.get_model_from_step(2)
@pytest.mark.parametrize("model_type", ["classification", "regression"])
def test_compute_model_performances(self, mocker, model_type):
data = mock_data(add_split_col=True, model_type=model_type)
fw_selection = ForwardFeatureSelection(model_type=model_type)
fw_selection._fitted_models = [
mock_model_num_pred(1, model_type=model_type),
mock_model_num_pred(2, model_type=model_type),
mock_model_num_pred(3, model_type=model_type)
]
def mock_evaluate(self, X, y, split, metric): # on AUC scale, but gives the same for RMSE as it is a mock
if split == "train":
return 0.612
else:
return 0.609
if model_type == "classification":
patch_fct = "cobra.model_building.forward_selection.LogisticRegressionModel.evaluate"
elif model_type == "regression":
patch_fct = "cobra.model_building.forward_selection.LinearRegressionModel.evaluate"
mocker.patch(patch_fct, mock_evaluate)
actual = (fw_selection
.compute_model_performances(data, "target",
splits=["train", "selection"],
metric=None))
expected = pd.DataFrame([
{"predictors": ["var1_enc"],
"last_added_predictor": "var1_enc",
"train_performance": 0.612, "selection_performance": 0.609,
"model_type": model_type},
{"predictors": ["var1_enc", "var2_enc"],
"last_added_predictor": "var2_enc",
"train_performance": 0.612, "selection_performance": 0.609,
"model_type": model_type},
{"predictors": ["var1_enc", "var2_enc", "var3_enc"],
"last_added_predictor": "var3_enc",
"train_performance": 0.612, "selection_performance": 0.609,
"model_type": model_type}
])
pd.testing.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("model_type", ["classification", "regression"])
def test_ffs_train_data_assertions(self, model_type):
fw_selection = ForwardFeatureSelection(model_type=model_type)
with pytest.raises(AssertionError): # no split column
fw_selection.fit(pd.DataFrame(), "target", predictors=[""])
df = mock_data(add_split_col=True, model_type=model_type)
with pytest.raises(AssertionError): # not at least train & selection sets
fw_selection.fit(df[df["split"] == "train"], "target", predictors=[""])
@pytest.mark.parametrize("model_type, max_predictors, expectation",
[("classification", 2, pytest.raises(ValueError)),
("classification", 3, does_not_raise()),
("classification", 5, does_not_raise()),
("classification", 10, does_not_raise()),
("classification", 15, does_not_raise()),
("regression", 2, pytest.raises(ValueError)),
("regression", 3, does_not_raise()),
("regression", 5, does_not_raise()),
("regression", 10, does_not_raise()),
("regression", 15, does_not_raise())
])
def test_fit(self, mocker, model_type, max_predictors: int, expectation):
# create list of elements [var1_enc, var2_enc, ..., var10_enc]
predictors_list = [f"var{i+1}_enc" for i in range(10)]
# extract sublist [var1_enc, var5_enc, var9_enc]
forced_predictors_list = predictors_list[::4]
ordered_output_list = (forced_predictors_list
+ [pred for pred in predictors_list
if pred not in forced_predictors_list])
fw_selection = ForwardFeatureSelection(model_type=model_type, max_predictors=max_predictors)
def mock_train_model(self, train_data, target_column_name, predictors):
return mock_model(predictors, model_type=model_type)
def mock_forward_selection(self, train_data, target_column_name,
predictors, forced_predictors):
n_models = min(max_predictors, len(predictors) + len(forced_predictors))
return [mock_model(ordered_output_list[:i+1], model_type=model_type)
for i in range(n_models)]
mocker.patch("cobra.model_building.ForwardFeatureSelection._train_model",
mock_train_model)
mocker.patch("cobra.model_building.ForwardFeatureSelection._forward_selection",
mock_forward_selection)
df = mock_data(add_split_col=True, model_type=model_type)
with expectation:
fw_selection.fit(df, "target", # data is ignored
predictors=predictors_list,
forced_predictors=forced_predictors_list,
excluded_predictors=[])
# for each fitted model, check number of predictors
actual = [model.predictors
for model in fw_selection._fitted_models]
expected = [ordered_output_list[:i+1]
for i in range(min(max_predictors,
len(predictors_list)))]
if max_predictors == len(forced_predictors_list):
expected = [forced_predictors_list]
assert actual == expected
@pytest.mark.parametrize("model_type, max_predictors", [("classification", 5),
("classification", 10),
("classification", 15),
("regression", 5),
("regression", 10),
("regression", 15)
])
def test_forward_selection(self, mocker, model_type, max_predictors: int):
# create list of elements [var1_enc, var2_c, ..., var10_enc]
predictors_list = [f"var{i+1}_enc" for i in range(10)]
# extract sublist [var1_enc, var5_enc, var9_enc]:
forced_predictors = predictors_list[::4]
# remove these from predictors list to have clean version
predictors = [pred for pred in predictors_list
if pred not in forced_predictors]
ordered_output_list = forced_predictors + predictors
def mock_find_next_best_model(self, train_data, target_column_name,
candidate_predictors,
current_predictors):
return mock_model(current_predictors + candidate_predictors[0:1], model_type=model_type)
mocker.patch(("cobra.model_building.ForwardFeatureSelection."
"_find_next_best_model"), mock_find_next_best_model)
fw_selection = ForwardFeatureSelection(model_type=model_type, max_predictors=max_predictors)
fitted_models = (fw_selection.
_forward_selection( | pd.DataFrame() | pandas.DataFrame |
"""Main module."""
import json
import logging
import click
import os
import sys
from datetime import datetime
from typing import List
from distutils.util import strtobool
import geofeather as gf
import geopandas as gpd
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
import requests
import xarray as xr
from osgeo import gdal, gdalconst
from shapely import speedups
from shapely.geometry import Point
from pathlib import Path
import pkg_resources
import fuzzywuzzy
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import timeit
#from .spacetag_schema import SpaceModel
import click
# Constants
COL_ORDER = [
"timestamp",
"country",
"admin1",
"admin2",
"admin3",
"lat",
"lng",
"feature",
"value",
]
GEO_TYPE_COUNTRY = "country"
GEO_TYPE_ADMIN1 = "state/territory"
GEO_TYPE_ADMIN2 = "county/district"
GEO_TYPE_ADMIN3 = "municipality/town"
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
logger = logging.getLogger(__name__)
def audit_renamed_col_dict(dct: dict) -> dict:
"""
Description
-----------
Handle edge cases where a col could be renamed back to itself.
example: no primary_geo, but country is present. Because it is a protected
col name, it would be renamed country_non_primary. Later, it would be set
as primary_geo country, and the pair added to renamed_col_dict again:
{'country_non_primary' : ['country'], "country": ['country_non_primary'] }
Parameters
----------
dct: dict
renamed_col_dict of key: new column name, value: list of old columns
Output
------
dict:
The modified parameter dict.
"""
remove_these = set()
for k, v in dct.items():
vstr = "".join(v)
if vstr in dct.keys() and [k] in dct.values():
remove_these.add(vstr)
remove_these.add(k)
for k in remove_these:
dct.pop(k, None)
return dct
def build_date_qualifies_field(qualified_col_dict: dict, assoc_fields: list) -> str:
"""
Description
-----------
Handle edge case of each date field in assoc_fields qualifying the same
column e.g. day/month/year are associated and qualify a field. In this
case, the new_column_name.
if assoc_fields is found as a value in qualified_col_dict, return the key
Parameters
----------
qualified_col_dict: dict
{'pop': ['month_column', 'day_column', 'year_column']}
assoc_fields: list
['month_column', 'day_column', 'year_column']
"""
for k, v in qualified_col_dict.items():
if v == assoc_fields:
return k
return None
def format_time(t: str, time_format: str, validate: bool = True) -> int:
"""
Description
-----------
Converts a time feature (t) into epoch time using `time_format` which is a strftime definition
Parameters
----------
t: str
the time string
time_format: str
the strftime format for the string t
validate: bool, default True
whether to error check the time string t. Is set to False, then no error is raised if the date fails to parse, but None is returned.
Examples
--------
>>> epoch = format_time('5/12/20 12:20', '%m/%d/%y %H:%M')
"""
try:
t_ = int(datetime.strptime(t, time_format).timestamp()) * 1000 # Want milliseonds
return t_
except Exception as e:
if t.endswith(' 00:00:00'):
# Depending on the date format, pandas.read_excel will read the
# date as a Timestamp, so here it is a str with format
# '2021-03-26 00:00:00'. For now, handle this single case until
# there is time for a more comprehensive solution e.g. add a custom
# date_parser function that doesn't parse diddly/squat to
# pandas.read_excel() in process().
return format_time(t.replace(' 00:00:00', ''), time_format, validate)
print(e)
if validate:
raise Exception(e)
else:
return None
def geocode(
admin: str, df: pd.DataFrame, x: str = "longitude", y: str = "latitude", gadm: gpd.GeoDataFrame = None,
df_geocode: pd.DataFrame = pd.DataFrame()
) -> pd.DataFrame:
"""
Description
-----------
Takes a dataframe containing coordinate data and geocodes it to GADM (https://gadm.org/)
GEOCODES to ADMIN 0, 1, 2 OR 3 LEVEL
Parameters
----------
admin: str
the level to geocode to. 'admin0' to 'admin3'
df: pd.DataFrame
a pandas dataframe containing point data
x: str, default 'longitude'
the name of the column containing longitude information
y: str, default 'latitude'
the name of the column containing latitude data
gadm: gpd.GeoDataFrame, default None
optional specification of a GeoDataFrame of GADM shapes of the appropriate
level (admin2/3) for geocoding
df_geocode: pd.DataFrame, default pd.DataFrame()
cached lat/long geocode library
Examples
--------
Geocoding a dataframe with columns named 'lat' and 'lon'
>>> df = geocode(df, x='lon', y='lat')
"""
flag = speedups.available
if flag == True:
speedups.enable()
cdir = os.path.expanduser("~")
download_data_folder = f"{cdir}/mixmasta_data"
# Only load GADM if it wasn't explicitly passed to the function.
if gadm is not None:
logging.info("GADM geo dataframe has been provided.")
else:
logging.info("GADM has not been provided; loading now.")
if admin in ['admin0','country']:
gadm_fn = f"gadm36_2.feather"
gadmDir = f"{download_data_folder}/{gadm_fn}"
gadm = gf.from_geofeather(gadmDir)
gadm["country"] = gadm["NAME_0"]
gadm = gadm[["geometry", "country"]]
elif admin == "admin1":
gadm_fn = f"gadm36_2.feather"
gadmDir = f"{download_data_folder}/{gadm_fn}"
gadm = gf.from_geofeather(gadmDir)
gadm["country"] = gadm["NAME_0"]
#gadm["state"] = gadm["NAME_1"]
gadm["admin1"] = gadm["NAME_1"]
#gadm = gadm[["geometry", "country", "state", "admin1"]]
gadm = gadm[["geometry", "country", "admin1"]]
elif admin == "admin2":
gadm_fn = f"gadm36_2.feather"
gadmDir = f"{download_data_folder}/{gadm_fn}"
gadm = gf.from_geofeather(gadmDir)
gadm["country"] = gadm["NAME_0"]
#gadm["state"] = gadm["NAME_1"]
gadm["admin1"] = gadm["NAME_1"]
gadm["admin2"] = gadm["NAME_2"]
#gadm = gadm[["geometry", "country", "state", "admin1", "admin2"]]
gadm = gadm[["geometry", "country", "admin1", "admin2"]]
elif admin == "admin3":
gadm_fn = f"gadm36_3.feather"
gadmDir = f"{download_data_folder}/{gadm_fn}"
gadm = gf.from_geofeather(gadmDir)
gadm["country"] = gadm["NAME_0"]
#gadm["state"] = gadm["NAME_1"]
gadm["admin1"] = gadm["NAME_1"]
gadm["admin2"] = gadm["NAME_2"]
gadm["admin3"] = gadm["NAME_3"]
#gadm = gadm[["geometry", "country", "state", "admin1", "admin2", "admin3"]]
gadm = gadm[["geometry", "country", "admin1", "admin2", "admin3"]]
start_time = timeit.default_timer()
# 1) Drop x,y duplicates from data frame.
df_drop_dup_geo = df[[x,y]].drop_duplicates(subset=[x,y])
# 2) Get x,y not in df_geocode.
if not df_geocode.empty and not df_drop_dup_geo.empty:
df_drop_dup_geo = df_drop_dup_geo.merge(df_geocode, on=[x,y], how='left', indicator=True)
df_drop_dup_geo = df_drop_dup_geo[ df_drop_dup_geo['_merge'] == 'left_only']
df_drop_dup_geo = df_drop_dup_geo[[x,y]]
if not df_drop_dup_geo.empty:
# dr_drop_dup_geo contains x,y not in df_geocode; so, these need to be
# geocoded and added to the df_geocode library.
# 3) Apply Point() to create the geometry col.
df_drop_dup_geo.loc[:, "geometry"] = df_drop_dup_geo.apply(lambda row: Point(row[x], row[y]), axis=1)
# 4) Sjoin unique geometries with GADM.
gdf = gpd.GeoDataFrame(df_drop_dup_geo)
# Spatial merge on GADM to obtain admin areas.
gdf = gpd.sjoin(gdf, gadm, how="left", op="within", lsuffix="mixmasta_left", rsuffix="mixmasta_geocoded")
del gdf["geometry"]
del gdf["index_mixmasta_geocoded"]
# 5) Add the new geocoding to the df_geocode lat/long geocode library.
if not df_geocode.empty:
df_geocode = df_geocode.append(gdf)
else:
df_geocode = gdf
# 6) Merge df and df_geocode on x,y
gdf = df.merge(df_geocode, how='left', on=[x,y])
return pd.DataFrame(gdf), df_geocode
def generate_column_name(field_list: list) -> str:
"""
Description
-----------
Contatenate a list of column fields into a single column name.
Parameters
----------
field_list: list[str] of column names
Returns
-------
str: new column name
"""
return ''.join(sorted(field_list))
def generate_timestamp_column(df: pd.DataFrame, date_mapper: dict, column_name: str) -> pd.DataFrame:
"""
Description
-----------
Efficiently add a new timestamp column to a dataframe. It avoids the use of df.apply
which appears to be much slower for large dataframes. Defaults to 1/1/1970 for
missing day/month/year values.
Parameters
----------
df: pd.DataFrame
our data
date_mapper: dict
a schema mapping (JSON) for the dataframe filtered for "date_type" equal to
Day, Month, or Year. The format is screwy for our purposes here and could
be reafactored.
column_name: str
name of the new column e.g. timestamp for primary_time, year1month1day1
for a concatneated name from associated date fields.
Examples
--------
This example adds the generated series to the source dataframe.
>>> df = df.join(df.apply(generate_timestamp, date_mapper=date_mapper,
column_name="year1month1day", axis=1))
"""
# Identify which date values are passed.
dayCol = None
monthCol = None
yearCol = None
for kk, vv in date_mapper.items():
if vv and vv["date_type"] == "day":
dayCol = kk
elif vv and vv["date_type"] == "month":
monthCol = kk
elif vv and vv["date_type"] == "year":
yearCol = kk
# For missing date values, add a column to the dataframe with the default
# value, then assign that to the day/month/year var. If the dataframe has
# the date value, assign day/month/year to it after casting as a str.
if dayCol:
day = df[dayCol].astype(str)
else:
df.loc[:, 'day_generate_timestamp_column'] = "1"
day = df['day_generate_timestamp_column']
if monthCol:
month = df[monthCol].astype(str)
else:
df.loc[:, 'month_generate_timestamp_column'] = "1"
month = df['month_generate_timestamp_column']
if yearCol:
year = df[yearCol].astype(str)
else:
df.loc[:, 'year_generate_timestamp_column'] = "01"
year = df['year_generate_timestamp_column']
# Add the new column
df.loc[:, column_name] = month + '/' + day + '/' + year
# Delete the temporary columns
if not dayCol:
del(df['day_generate_timestamp_column'])
if not monthCol:
del(df['month_generate_timestamp_column'])
if not yearCol:
del(df['year_generate_timestamp_column'])
return df
def generate_timestamp_format(date_mapper: dict) -> str:
"""
Description
-----------
Generates a the time format for day,month,year dates based on each's
specified time_format.
Parameters
----------
date_mapper: dict
a dictionary for the schema mapping (JSON) for the dataframe filtered
for "date_type" equal to Day, Month, or Year.
Output
------
e.g. "%m/%d/%Y"
"""
day = "%d"
month = "%m"
year = "%y"
for kk, vv in date_mapper.items():
if vv["date_type"] == "day":
day = vv["time_format"]
elif vv["date_type"] == "month":
month = vv["time_format"]
elif vv["date_type"] == "year":
year = vv["time_format"]
return str.format("{}/{}/{}", month, day, year)
def get_iso_country_dict(iso_list: list) -> dict:
"""
Description
-----------
iso2 or iso3 is used as primary_geo and therefore the country column.
Load the custom iso lookup table and return a dictionary of the iso codes
as keys and the country names as values. Assume all list items are the same
iso type.
Parameters
----------
iso_list:
list of iso2 or iso3 codes
Returns
-------
dict:
key: iso code; value: country name
"""
dct = {}
if iso_list:
iso_df = pd.DataFrame
try:
# The necessary code to load from pkg doesn't currently work in VS
# Code Debug, so wrap in try/except.
#iso_df = pd.read_csv(pkg_resources.resource_stream(__name__, 'data/iso_lookup.csv'))
with pkg_resources.resource_stream(__name__, 'data/iso_lookup.csv') as f:
iso_df = pd.read_csv(f)
#path = Path(__file__).parent / "data/iso_lookup.csv"
#iso_df = pd.read_csv(path)
except:
# Local VS Code load.
path = Path(__file__).parent / "data/iso_lookup.csv"
iso_df = pd.read_csv(path)
if iso_df.empty:
return dct
if len(iso_list[0]) == 2:
for iso in iso_list:
if iso in iso_df["iso2"].values:
dct[iso] = iso_df.loc[iso_df["iso2"] == iso]["country"].item()
else:
for iso in iso_list:
if iso in iso_df["iso3"].values:
dct[iso] = iso_df.loc[iso_df["iso3"] == iso]["country"].item()
return dct
def handle_colname_collisions(df: pd.DataFrame, mapper: dict, protected_cols: list) -> (pd.DataFrame, dict, dict):
"""
Description
-----------
Identify mapper columns that match protected column names. When found,
update the mapper and dataframe, and keep a dict of these changes
to return to the caller e.g. SpaceTag.
Parameters
----------
df: pd.DataFrame
submitted data
mapper: dict
a dictionary for the schema mapping (JSON) for the dataframe.
protected_cols: list
protected column names i.e. timestamp, country, admin1, feature, etc.
Output
------
pd.DataFame:
The modified dataframe.
dict:
The modified mapper.
dict:
key: new column name e.g. "day1month1year1" or "country_non_primary"
value: list of old column names e.g. ['day1','month1','year1'] or ['country']
"""
# Get names of geo fields that collide and are not primary_geo = True
non_primary_geo_cols = [d["name"] for d in mapper["geo"] if d["name"] in protected_cols and ("primary_geo" not in d or d["primary_geo"] == False)]
# Get names of date fields that collide and are not primary_date = True
non_primary_time_cols = [d['name'] for d in mapper['date'] if d["name"] in protected_cols and ('primary_date' not in d or d['primary_date'] == False)]
# Only need to change a feature column name if it qualifies another field,
# and therefore will be appended as a column to the output.
feature_cols = [d["name"] for d in mapper['feature'] if d["name"] in protected_cols and "qualifies" in d and d["qualifies"]]
# Verbose build of the collision_list, could have combined above.
collision_list = non_primary_geo_cols + non_primary_time_cols + feature_cols
# Bail if no column name collisions.
if not collision_list:
return df, mapper, {}
# Append any collision columns with the following suffix.
suffix = "_non_primary"
# Build output dictionary and update df.
renamed_col_dict = {}
for col in collision_list:
df.rename(columns={col: col + suffix}, inplace=True)
renamed_col_dict[col + suffix] = [col]
# Update mapper
for k, vlist in mapper.items():
for dct in vlist:
if dct["name"] in collision_list:
dct["name"] = dct["name"] + suffix
elif "qualifies" in dct and dct["qualifies"]:
# change any instances of this column name qualified by another field
dct["qualifies"] = [w.replace(w, w + suffix) if w in collision_list else w for w in dct["qualifies"] ]
elif "associated_columns" in dct and dct["associated_columns"]:
# change any instances of this column name in an associated_columns dict
dct["associated_columns"] = {k: v.replace(v, v + suffix) if v in collision_list else v for k, v in dct["associated_columns"].items() }
return df, mapper, renamed_col_dict
def match_geo_names(admin: str, df: pd.DataFrame, resolve_to_gadm_geotypes: list, gadm: gpd.GeoDataFrame = None) -> pd.DataFrame:
"""
Assumption
----------
Country was selected by drop-down on file submission, column "country"
is present in the data frame, and lng/lat is not being used for geocoding.
Parameters
----------
admin: str
the level to geocode to. Either 'admin2' or 'admin3'
df: pandas.DataFrame
the uploaded dataframe
resolve_to_gadm_geotypes:
list of geotypes marked resolve_to_gadm = True e.g. ["admin1", "country"]
gadm: gpd.GeoDataFrame, default None
optional specification of a GeoDataFrame of GADM shapes of the appropriate
level (admin2/3) for geocoding
Result
------
A pandas.Dataframe produced by modifying the parameter df.
"""
print('geocoding ...')
flag = speedups.available
if flag == True:
speedups.enable()
cdir = os.path.expanduser("~")
download_data_folder = f"{cdir}/mixmasta_data"
# only load GADM if it wasn't explicitly passed to the function.
if gadm is not None:
#logging.info("GADM geo dataframe has been provided.")
pass
else:
logging.info("GADM has not been provided; loading now.")
if admin == "admin2":
gadm_fn = f"gadm36_2.feather"
else:
gadm_fn = f"gadm36_3.feather"
gadmDir = f"{download_data_folder}/{gadm_fn}"
gadm = gf.from_geofeather(gadmDir)
gadm["country"] = gadm["NAME_0"]
gadm["state"] = gadm["NAME_1"]
gadm["admin1"] = gadm["NAME_1"]
gadm["admin2"] = gadm["NAME_2"]
if admin == "admin2":
gadm = gadm[["country", "state", "admin1", "admin2"]]
else:
gadm["admin3"] = gadm["NAME_3"]
gadm = gadm[["country", "state", "admin1", "admin2", "admin3"]]
# Filter GADM for countries in df.
countries = df["country"].unique()
# Correct country names.
if GEO_TYPE_COUNTRY in resolve_to_gadm_geotypes:
gadm_country_list = gadm["country"].unique()
unknowns = df[~df.country.isin(gadm_country_list)].country.tolist()
for unk in unknowns:
match = fuzzywuzzy.process.extractOne(unk, gadm_country_list, scorer=fuzz.partial_ratio)
if match != None:
df.loc[df.country == unk, 'country'] = match[0]
# Filter GADM dicitonary for only those countries (ie. speed up)
gadm = gadm[gadm["country"].isin(countries)]
# Loop by country using gadm dict filtered for that country.
for c in countries:
# The following ignores admin1 / admin2 pairs; it only cares if those
# values exist for the appropriate country.
# Get list of admin1 values in df but not in gadm. Reduce list for country.
if GEO_TYPE_ADMIN1 in resolve_to_gadm_geotypes:
admin1_list = gadm[gadm.country==c]["admin1"].unique()
if admin1_list is not None and all(admin1_list) and 'admin1' in df:
unknowns = df[(df.country == c) & ~df.admin1.isin(admin1_list)].admin1.tolist()
unknowns = [x for x in unknowns if pd.notnull(x) and x.strip()] # remove Nan
for unk in unknowns:
match = fuzzywuzzy.process.extractOne(unk, admin1_list, scorer=fuzz.partial_ratio)
if match != None:
df.loc[df.admin1 == unk, 'admin1'] = match[0]
# Get list of admin2 values in df but not in gadm. Reduce list for country.
if GEO_TYPE_ADMIN2 in resolve_to_gadm_geotypes:
admin2_list = gadm[gadm.country==c ]["admin2"].unique()
if admin2_list is not None and all(admin2_list) and 'admin2' in df:
unknowns = df[(df.country == c) & ~df.admin2.isin(admin2_list)].admin2.tolist()
unknowns = [x for x in unknowns if pd.notnull(x) and x.strip()] # remove Nan
for unk in unknowns:
match = fuzzywuzzy.process.extractOne(unk, admin2_list, scorer=fuzz.partial_ratio)
if match != None:
df.loc[df.admin2 == unk, 'admin2'] = match[0]
if admin =='admin3' and GEO_TYPE_ADMIN3 in resolve_to_gadm_geotypes:
# Get list of admin3 values in df but not in gadm. Reduce list for country.
admin3_list = gadm[gadm.country==c]["admin3"].unique()
if admin3_list is not None and all(admin3_list) and 'admin3' in df:
unknowns = df[(df.country == c) & ~df.admin3.isin(admin3_list)].admin3.tolist()
unknowns = [x for x in unknowns if pd.notnull(x) and x.strip()] # remove Nan
for unk in unknowns:
match = fuzzywuzzy.process.extractOne(unk, admin3_list, scorer=fuzz.partial_ratio)
if match != None:
df.loc[df.admin3 == unk, 'admin3'] = match[0]
return df
def netcdf2df(netcdf: str) -> pd.DataFrame:
"""
Produce a dataframe from a NetCDF4 file.
Parameters
----------
netcdf: str
Path to the netcdf file
Returns
-------
DataFrame
The resultant dataframe
"""
try:
ds = xr.open_dataset(netcdf)
except:
raise AssertionError(f"improperly formatted netCDF file ({netcdf})")
data = ds.to_dataframe()
df = data.reset_index()
return df
def normalizer(df: pd.DataFrame, mapper: dict, admin: str, gadm: gpd.GeoDataFrame = None, df_geocode: pd.DataFrame = pd.DataFrame()) -> (pd.DataFrame, dict, pd.DataFrame):
"""
Description
-----------
Converts a dataframe into a CauseMos compliant format.
Parameters
----------
df: pd.DataFrame
a pandas dataframe containing point data
mapper: dict
a schema mapping (JSON) for the dataframe
a dict where keys will be geo, feaure, date, and values will be lists of dict
example:
{ 'geo': [
{'name': 'country', 'type': 'geo', 'geo_type': 'country', 'primary_geo': False},
{'name': 'state', 'type': 'geo', 'geo_type': 'state/territory', 'primary_geo': False}
],
'feature': [
{'name': 'probabilty', 'type': 'feature', 'feature_type': 'float'},
{'name': 'color', 'type': 'feature', 'feature_type': 'str'}
],
'date': [
{'name': 'date_2', 'type': 'date', 'date_type': 'date', 'primary_date': False, 'time_format': '%m/%d/%y'},
{'name': 'date', 'type': 'date', 'date_type': 'date', 'primary_date': True, 'time_format': '%m/%d/%y'}
]
}
admin: str, default 'admin2'
the level to geocode to. Either 'admin2' or 'admin3'
gadm: gpd.GeoDataFrame, default None
optional specification of a GeoDataFrame of GADM shapes of the appropriate
level (admin2/3) for geocoding
df_gecode: pd.DataFrame, default pd.DataFrame()
lat,long geocode lookup library
Returns
-------
pd.DataFrame: CauseMos compliant format ready to be written to parquet.
dict: dictionary of modified column names; used by SpaceTag
pd.DataFRame: update lat,long geocode looup library
Examples
--------
>>> df_norm = normalizer(df, mapper, 'admin3')
"""
col_order = COL_ORDER.copy()
required_cols = [
"timestamp",
"country",
"admin1",
"admin2",
"admin3",
"lat",
"lng",
]
# List of date_types that be used to build a date.
MONTH_DAY_YEAR = ["day","month","year"]
# Create a dictionary of list: colnames: new col name, and modify df and
# mapper for any column name collisions.
df, mapper, renamed_col_dict = handle_colname_collisions(df, mapper, col_order)
### mapper is a dictionary of lists of dictionaries.
click.echo("Raw dataframe:")
click.echo(df.head())
# list of names of datetime columns primary_date=True
primary_time_cols = [k['name'] for k in mapper['date'] if 'primary_date' in k and k['primary_date'] == True]
# list of names of datetime columns no primary_date or primary_date = False
other_time_cols = [k['name'] for k in mapper['date'] if 'primary_date' not in k or k['primary_date'] == False]
# list of names of geo columns primary_geo=True
primary_geo_cols = [k["name"] for k in mapper["geo"] if "primary_geo" in k and k["primary_geo"] == True]
# list of geotypes of geo columns primary_geo=True (used for match_geo_names logic below)
primary_geo_types = [k["geo_type"] for k in mapper["geo"] if "primary_geo" in k and k["primary_geo"] == True]
# qualified_col_dict: dictionary for columns qualified by another column.
# key: qualified column
# value: list of columns that qualify key column
qualified_col_dict = {}
# subset dataframe for only columns specified in mapper schema.
# get all named objects in the date, feature, geo schema lists.
mapper_keys = []
for k in mapper.items():
mapper_keys.extend([l['name'] for l in k[1] if 'name' in l])
df = df[mapper_keys]
# Rename protected columns
# and perform type conversion on the time column
features = []
primary_date_group_mapper = {}
other_date_group_mapper = {}
for date_dict in mapper["date"]:
kk = date_dict["name"]
if kk in primary_time_cols:
# There should only be a single epoch or date field, or a single
# group of year/month/day/minute/second marked as primary_time in
# the loaded schema.
if date_dict["date_type"] == "date":
# convert primary_time of date_type date to epochtime and rename as 'timestamp'
df.loc[:, kk] = df[kk].apply(lambda x: format_time(str(x), date_dict["time_format"], validate=False))
staple_col_name = "timestamp"
df.rename(columns={kk: staple_col_name}, inplace=True)
# renamed_col_dict[ staple_col_name ] = [kk] # 7/2/2021 do not include primary cols
elif date_dict["date_type"] == "epoch":
# rename epoch time column as 'timestamp'
staple_col_name = "timestamp"
df.rename(columns={kk: staple_col_name}, inplace=True)
#renamed_col_dict[ staple_col_name ] = [kk] # 7/2/2021 do not include primary cols
elif date_dict["date_type"] in ["day","month","year"]:
primary_date_group_mapper[kk] = date_dict
else:
if date_dict["date_type"] == "date":
# Convert all date/time to epoch time if not already.
df.loc[:, kk] = df[kk].apply(lambda x: format_time(str(x), date_dict["time_format"], validate=False))
# If three are no assigned primary_time columns, make this the
# primary_time timestamp column, and keep as a feature so the
# column_name meaning is not lost.
if not primary_time_cols and not "timestamp" in df.columns:
df.rename(columns={kk: "timestamp"}, inplace=True)
staple_col_name ="timestamp"
renamed_col_dict[ staple_col_name ] = [kk]
# All not primary_time, not associated_columns fields are pushed to features.
features.append(kk)
elif date_dict["date_type"] in MONTH_DAY_YEAR and 'associated_columns' in date_dict and date_dict["associated_columns"]:
# Various date columns have been associated by the user and are not primary_date.
# convert them to epoch then store them as a feature
# (instead of storing them as separate uncombined features).
# handle this dict after iterating all date fields
other_date_group_mapper[kk] = date_dict
else:
features.append(kk)
if "qualifies" in date_dict and date_dict["qualifies"]:
# Note that any "qualifier" column that is not primary geo/date
# will just be lopped on to the right as its own column. It's
# column name will just be the name and Uncharted will deal with
# it. The key takeaway is that qualifier columns grow the width,
# not the length of the dataset.
# Want to add the qualified col as the dictionary key.
# e.g. "name": "region", "qualifies": ["probability", "color"]
# should produce two dict entries for prob and color, with region
# in a list as the value for both.
for k in date_dict["qualifies"]:
if k in qualified_col_dict:
qualified_col_dict[k].append(kk)
else:
qualified_col_dict[k] = [kk]
if primary_date_group_mapper:
# Applied when there were primary_date year,month,day fields above.
# These need to be combined
# into a date and then epoch time, and added as the timestamp field.
# Create a separate df of the associated date fields. This avoids
# pandas upcasting the series dtypes on df.apply(); e.g., int to float,
# or a month 9 to 9.0, which breaks generate_timestamp()
assoc_fields = primary_date_group_mapper.keys()
date_df = df[ assoc_fields ]
# Now generate the timestamp from date_df and add timestamp col to df.
df = generate_timestamp_column(df, primary_date_group_mapper, "timestamp")
# Determine the correct time format for the new date column, and
# convert to epoch time.
time_formatter = generate_timestamp_format(primary_date_group_mapper)
df['timestamp'] = df["timestamp"].apply(lambda x: format_time(str(x), time_formatter, validate=False))
# Let SpaceTag know those date columns were renamed to timestamp.
#renamed_col_dict[ "timestamp" ] = assoc_fields # 7/2/2021 do not include primary cols
while other_date_group_mapper:
# Various date columns have been associated by the user and are not primary_date.
# Convert to epoch time and store as a feature, do not store these separately in features.
# Exception is the group is only two of day, month, year: leave as date.
# Control for possibility of more than one set of assciated_columns.
# Pop the first item in the mapper and begin building that date set.
date_field_tuple = other_date_group_mapper.popitem()
# Build a list of column names associated with the the popped date field.
assoc_fields = [k[1] for k in date_field_tuple[1]['associated_columns'].items()]
# Pop those mapper objects into a dict based on the column name keys in
# assocfields list.
assoc_columns_dict = { f : other_date_group_mapper.pop(f) for f in assoc_fields if f in other_date_group_mapper }
# Add the first popped tuple into the assoc_columns dict where the key is the
# first part of the tuple; the value is the 2nd part.
assoc_columns_dict[date_field_tuple[0]] = date_field_tuple[1]
# Add the first popped tuple column name to the list of associated fields.
assoc_fields.append(date_field_tuple[0])
# TODO: If day and year are associated to each other and month, but
# month is not associated to those fields, then at this point assoc_fields
# will be the three values, and assoc_columns will contain only day and
# year. This will error out below. It is assumed that SpaceTag will
# control for this instance.
# If there is no primary_time column for timestamp, which would have
# been created above with primary_date_group_mapper, or farther above
# looping mapper["date"], attempt to generate from date_type = Month,
# Day, Year features. Otherwise, create a new column name from the
# concatenation of the associated date fields here.
if not "timestamp" in df.columns:
new_column_name = "timestamp"
else:
new_column_name = generate_column_name(assoc_fields)
# Create a separate df of the associated date fields. This avoids
# pandas upcasting the series dtypes on df.apply(); e.g., int to float,
# or a month 9 to 9.0, which breaks generate_timestamp()
date_df = df[ assoc_fields ]
# Now generate the timestamp from date_df and add timestamp col to df.
df = generate_timestamp_column(df, assoc_columns_dict, new_column_name)
# Determine the correct time format for the new date column, and
# convert to epoch time only if all three date components (day, month,
# year) are present; otherwise leave as a date string.
date_types = [v["date_type"] for k,v in assoc_columns_dict.items()]
if len(frozenset(date_types).intersection(MONTH_DAY_YEAR)) == 3:
time_formatter = generate_timestamp_format(assoc_columns_dict)
df.loc[:, new_column_name] = df[new_column_name].apply(lambda x: format_time(str(x), time_formatter, validate=False))
# Let SpaceTag know those date columns were renamed to a new column.
renamed_col_dict[ new_column_name] = assoc_fields
# timestamp is a protected column, so don't add to features.
if new_column_name != "timestamp":
# Handle edge case of each date field in assoc_fields qualifying
# the same column e.g. day/month/year are associated and qualify
# a field. In this case, the new_column_name
qualified_col = build_date_qualifies_field(qualified_col_dict, assoc_fields)
if qualified_col is None:
features.append(new_column_name)
else:
qualified_col_dict[qualified_col] = [new_column_name]
for geo_dict in mapper["geo"]:
kk = geo_dict["name"]
if kk in primary_geo_cols:
if geo_dict["geo_type"] == "latitude":
staple_col_name = "lat"
df.rename(columns={kk: staple_col_name}, inplace=True)
#renamed_col_dict[staple_col_name] = [kk] # 7/2/2021 do not include primary cols
elif geo_dict["geo_type"] == "longitude":
staple_col_name = "lng"
df.rename(columns={kk: staple_col_name}, inplace=True)
#renamed_col_dict[staple_col_name] = [kk] # 7/2/2021 do not include primary cols
elif geo_dict["geo_type"] == "coordinates":
c_f = geo_dict["coord_format"]
coords = df[kk].values
if c_f == "lonlat":
lats = [x for x in coords.split(",")[1]]
longs = [x for x in coords.split(",")[0]]
else:
lats = [x for x in coords.split(",")[0]]
longs = [x for x in coords.split(",")[1]]
df["lng"] = longs
df["lat"] = lats
del df[kk]
elif geo_dict["geo_type"] == GEO_TYPE_COUNTRY and kk != "country":
# force the country column to be named country
staple_col_name = "country"
df.rename(columns={kk: staple_col_name}, inplace=True)
#renamed_col_dict[staple_col_name] = [kk] # 7/2/2021 do not include primary cols
elif geo_dict["geo_type"] == GEO_TYPE_ADMIN1 and kk != "admin1":
# force the country column to be named country
staple_col_name = "admin1"
df.rename(columns={kk: staple_col_name}, inplace=True)
elif geo_dict["geo_type"] == GEO_TYPE_ADMIN2 and kk != "admin2":
# force the country column to be named country
staple_col_name = "admin2"
df.rename(columns={kk: staple_col_name}, inplace=True)
elif geo_dict["geo_type"] == GEO_TYPE_ADMIN3 and kk != "admin2":
# force the country column to be named country
staple_col_name = "admin3"
df.rename(columns={kk: staple_col_name}, inplace=True)
elif str(geo_dict["geo_type"]).lower() in ["iso2", "iso3"]:
# use the ISO2 or ISO3 column as country
# use ISO2/3 lookup dictionary to change ISO to country name.
iso_list = df[kk].unique().tolist()
dct = get_iso_country_dict(iso_list)
df.loc[:, kk] = df[kk].apply(lambda x: dct[x] if x in dct else x)
# now rename that column as "country"
staple_col_name = "country"
df.rename(columns={kk: staple_col_name}, inplace=True)
#renamed_col_dict[staple_col_name] = [kk] # 7/2/2021 do not include primary cols
elif "qualifies" in geo_dict and geo_dict["qualifies"]:
# Note that any "qualifier" column that is not primary geo/date
# will just be lopped on to the right as its own column. It'โ's
# column name will just be the name and Uncharted will deal with
# it. The key takeaway is that qualifier columns grow the width,
# not the length of the dataset.
# Want to add the qualified col as the dictionary key.
# e.g. "name": "region", "qualifies": ["probability", "color"]
# should produce two dict entries for prob and color, with region
# in a list as the value for both.
for k in geo_dict["qualifies"]:
if k in qualified_col_dict:
qualified_col_dict[k].append(kk)
else:
qualified_col_dict[k] = [kk]
else:
# only push geo columns to the named columns
# in the event there is no primary geo
# otherwise they are features and we geocode lat/lng
if len(primary_geo_cols) == 0:
if geo_dict["geo_type"] == GEO_TYPE_COUNTRY:
df["country"] = df[kk]
renamed_col_dict["country"] = [kk]
continue
if geo_dict["geo_type"] == GEO_TYPE_ADMIN1:
df["admin1"] = df[kk]
renamed_col_dict["admin1"] = [kk]
continue
if geo_dict["geo_type"] == GEO_TYPE_ADMIN2:
df["admin2"] = df[kk]
renamed_col_dict["admin2"] = [kk]
continue
if geo_dict["geo_type"] == GEO_TYPE_ADMIN3:
df["admin3"] = df[kk]
renamed_col_dict["admin3"] = [kk]
continue
features.append(kk)
# Append columns annotated in feature dict to features list (if not a
# qualifies column)
#features.extend([k["name"] for k in mapper["feature"]])
for feature_dict in mapper["feature"]:
if "qualifies" not in feature_dict or not feature_dict["qualifies"]:
features.append(feature_dict["name"])
elif "qualifies" in feature_dict and feature_dict["qualifies"]:
# Note that any "qualifier" column that is not primary geo/date
# will just be lopped on to the right as its own column. It's
# column name will just be the name and Uncharted will deal with
# it. The key takeaway is that qualifier columns grow the width,
# not the length of the dataset.
# Want to add the qualified col as the dictionary key.
# e.g. "name": "region", "qualifies": ["probability", "color"]
# should produce two dict entries for prob and color, with region
# in a list as the value for both.
for k in feature_dict["qualifies"]:
kk = feature_dict["name"]
if k in qualified_col_dict:
qualified_col_dict[k].append(kk)
else:
qualified_col_dict[k] = [kk]
# Convert aliases based on user annotations
aliases = feature_dict.get("aliases", {})
if aliases:
click.echo(f"Pre-processed aliases are: {aliases}")
type_ = df[feature_dict["name"]].dtype.type
click.echo(f"Detected column type is: {type_}")
aliases_ = {}
# The goal below is to identify the data type and then to cast the
# alias key from string into that type so that it will match
# if that fails, just cast it as a string
for kk, vv in aliases.items():
try:
if issubclass(type_, (int, np.integer)):
click.echo("Aliasing: integer detected")
aliases_[int(kk)] = vv
elif issubclass(type_, (float, np.float16, np.float32, np.float64, np.float128)):
click.echo("Aliasing: float detected")
aliases_[float(kk)] = vv
elif issubclass(type_, (bool, np.bool, np.bool_)):
click.echo("Aliasing: boolean detected")
if strtobool(kk) == 1:
aliases_[True] = vv
click.echo("Converted true string to boolean")
else:
click.echo("Converted false string to boolean")
aliases_[False] = vv
# Fall back on string
else:
click.echo("Aliasing: string detected")
aliases_[kk] = vv
except ValueError as e:
# Fall back on string
click.echo(f"Error: {e}")
aliases_[kk] = vv
click.echo(f"Aliases for {feature_dict['name']} are {aliases_}.")
df[[feature_dict["name"]]] = df[[feature_dict["name"]]].replace(aliases_)
# Since the user has decided to apply categorical aliases to this feature, we must coerce
# the entire feature to a string, even if they did not alias every value within the feature
# the reason for this is to avoid mixed types within the feature (e.g. half int/half string)
# since this makes it difficult to visualize
df[[feature_dict["name"]]] = df[[feature_dict["name"]]].astype(str)
# perform geocoding if lat/lng are present
if "lat" in df and "lng" in df:
df, df_geocode = geocode(admin, df, x="lng", y="lat", gadm=gadm, df_geocode=df_geocode)
elif "country" in primary_geo_types or ("country" in df and not primary_geo_types):
# Correct any misspellings etc. in state and admin areas when not
# geocoding lat and lng above, and country is the primary_geo.
# This doesn't match names if iso2/iso3 are primary, and when country
# admin1-3 are moved to features. Exception is when country is present,
# but nothing is marked as primary.
# Only geo_code resolve_to_gadm = True fields.
# Used below when match_geocode_names
resolve_to_gadm_geotypes = [k["geo_type"] for k in mapper["geo"] if "resolve_to_gadm" in k and k["resolve_to_gadm"] == True]
if resolve_to_gadm_geotypes:
df = match_geo_names(admin, df, resolve_to_gadm_geotypes)
df_geo_cols = [i for i in df.columns if 'mixmasta_geocoded' in i]
for c in df_geo_cols:
df.rename(columns={c: c.replace('_mixmasta_geocoded','')}, inplace=True)
# protected_cols are the required_cols present in the submitted dataframe.
protected_cols = list(set(required_cols) & set(df.columns))
# if a field qualifies a protected field like country, it should have data
# in each row, unlike features below where the qualifying data appears
# only on those rows.
# k: qualified column (str)
# v: list of columns (str) that qualify k
for k,v in qualified_col_dict.items():
if k in protected_cols:
# k is qualified by the columns in v, and k is a protected column,
# so extend the width of the output dataset with v for each row.
protected_cols.extend(v)
col_order.extend(v)
# Prepare output by
# 1. if there are no features, simply reduce the dataframe.
# or, 2.iterating features to add to feature adn value columns.
if not features:
df_out = df[protected_cols]
else:
df_out = pd.DataFrame()
for feat in features:
using_cols = protected_cols.copy()
if feat in qualified_col_dict:
# dict value is a list, so extend.
using_cols.extend(qualified_col_dict[feat])
# add a qualifying column name only if not in col_order already
for c in qualified_col_dict[feat]:
if c not in col_order:
col_order.append(c)
join_overlap = False
try:
df_ = df[using_cols + [feat+'_mixmasta_left']].copy()
join_overlap = True
except:
df_ = df[using_cols + [feat]].copy()
try:
if mapper[feat]["new_col_name"] == None:
df_["feature"] = feat
else:
df_["feature"] = mapper[feat]["new_col_name"]
except:
df_["feature"] = feat
if join_overlap:
df_.rename(columns={f"{feat}_mixmasta_left": "value"}, inplace=True)
else:
df_.rename(columns={feat: "value"}, inplace=True)
# Add feature/value for epochtime as object adds it without decimal
# places, but it is still saved as a double in the parquet file.
if len(df_out) == 0:
if feat in other_time_cols:
df_out = df_.astype({'value': object})
else:
df_out = df_
else:
if feat in other_time_cols:
df_out = df_out.append(df_.astype({'value': object}))
else:
df_out = df_out.append(df_)
for c in col_order:
if c not in df_out:
df_out[c] = None
# Drop rows with nulls in value column.
df_out.dropna(axis=0, subset=['value'], inplace=True)
# Handle any renamed cols being renamed.
renamed_col_dict = audit_renamed_col_dict(renamed_col_dict)
click.echo("Processed dataframe:")
click.echo(df_out.head())
return df_out[col_order], renamed_col_dict, df_geocode
def optimize_df_types(df: pd.DataFrame):
"""
Pandas will upcast essentially everything. This will use the built-in
Pandas function to_numeeric to downcast dataframe series to types that use
less memory e.g. float64 to float32.
For very large dataframes the memory reduction should translate into
increased efficieny.
"""
floats = df.select_dtypes(include=['float64']).columns.tolist()
df[floats] = df[floats].apply(pd.to_numeric, downcast='float')
ints = df.select_dtypes(include=['int64']).columns.tolist()
df[ints] = df[ints].apply(pd.to_numeric, downcast='integer')
#for col in df.select_dtypes(include=['object']):
# num_unique_values = len(df[col].unique())
# num_total_values = len(df[col])
# if float(num_unique_values) / num_total_values < 0.5:
# df[col] = df[col].astype('category')
return df
def process(fp: str, mp: str, admin: str, output_file: str, write_output = True, gadm=None):
"""
Parameters
----------
mp: str
Filename for JSON mapper from spacetag.
Schema: https://github.com/jataware/spacetag/blob/schema/schema.py
Example: https://github.com/jataware/spacetag/blob/schema/example.json
gadm: gpd.GeoDataFrame, default None
optional specification of a GeoDataFrame of GADM shapes of the appropriate
level (admin2/3) for geocoding
"""
# Read JSON schema to be mapper.
mapper = dict
with open(mp) as f:
mapper = json.loads(f.read())
# Validate JSON mapper schema against SpaceTag schema.py model.
#model = SpaceModel(geo=mapper['geo'], date=mapper['date'], feature=mapper['feature'], meta=mapper['meta'])
# "meta" portion of schema specifies transformation type
transform = mapper["meta"]
# Check transform for meta.geocode_level. Update admin to this if present.
if (admin == None and "geocode_level" in transform):
admin = transform["geocode_level"]
ftype = transform["ftype"]
if ftype == "geotiff":
if transform["date"] == "":
d = None
else:
d = transform["date"]
df = raster2df(
InRaster = fp,
feature_name = transform["feature_name"],
band = int(transform["band"] if "band" in transform and transform["band"] != "" else "0"),
nodataval = int(transform["null_val"]),
date = d,
band_name = transform["band_name"],
bands = transform["bands"] if "bands" in transform else None
)
elif ftype == 'excel':
df = | pd.read_excel(fp, transform['sheet']) | pandas.read_excel |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
# calc WYT and 8RI. add columns to datafile from cdec_scraper.
# confirm against http://cdec.water.ca.gov/cgi-progs/iodir/WSIHIST
cfsd_mafd = 2.29568411*10**-5 * 86400 / 10 ** 6
water_year = lambda d: d.year+1 if d.dayofyear >= 274 else d.year
winter = lambda y: (y.index.month >= 10) | (y.index.month <= 3)
summer = lambda y: (y.index.month >= 4) & (y.index.month <= 7)
SR_pts = ['BND_fnf', 'ORO_fnf', 'YRS_fnf', 'FOL_fnf']
SJR_pts = ['NML_fnf', 'TLG_fnf', 'MRC_fnf', 'MIL_fnf']
# don't change this data
df = | pd.read_csv('cord-data.csv', index_col=0, parse_dates=True) | pandas.read_csv |
# based on https://github.com/EntilZha/BLINK from EntilZha
import sys
# blink is in ..
sys.path.append(".")
# +
import json
import numpy as np
import torch
import uvicorn
from colorama import init
from fastapi import FastAPI
from termcolor import colored
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
import blink.candidate_ranking.utils as utils
import blink.ner as NER
from blink.biencoder.biencoder import BiEncoderRanker, load_biencoder
from blink.biencoder.data_process import (get_candidate_representation,
process_mention_data)
from blink.crossencoder.crossencoder import (CrossEncoderRanker,
load_crossencoder)
from blink.crossencoder.data_process import prepare_crossencoder_data
from blink.crossencoder.train_cross import evaluate, modify
from blink.indexer.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer
from pydantic import BaseModel
# -
from addict import Dict
from typing import List
# for nil models
import pickle
# for jaccard
import textdistance
#
import numpy as np
import pandas as pd
# patch for running fastapi in jupyter
import nest_asyncio
# +
class Item(BaseModel):
text: str
class Sample(BaseModel):
label:str
label_id:int
context_left: str
context_right:str
mention: str
start_pos:int
end_pos: int
sent_idx:int
#text: str
HIGHLIGHTS = [
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
]
def _print_colorful_text(input_sentence, samples):
init() # colorful output
msg = ""
if samples and (len(samples) > 0):
msg += input_sentence[0 : int(samples[0]["start_pos"])]
for idx, sample in enumerate(samples):
msg += colored(
input_sentence[int(sample["start_pos"]) : int(sample["end_pos"])],
"grey",
HIGHLIGHTS[idx % len(HIGHLIGHTS)],
)
if idx < len(samples) - 1:
msg += input_sentence[
int(sample["end_pos"]) : int(samples[idx + 1]["start_pos"])
]
else:
msg += input_sentence[int(sample["end_pos"]) :]
else:
msg = input_sentence
print("\n" + str(msg) + "\n")
def _print_colorful_prediction(
idx, sample, e_id, e_title, e_text, e_url, show_url=False
):
print(colored(sample["mention"], "grey", HIGHLIGHTS[idx % len(HIGHLIGHTS)]))
to_print = "id:{}\ntitle:{}\ntext:{}\n".format(e_id, e_title, e_text[:256])
if show_url:
to_print += "url:{}\n".format(e_url)
print(to_print)
def _annotate(ner_model, input_sentences):
ner_output_data = ner_model.predict(input_sentences)
sentences = ner_output_data["sentences"]
mentions = ner_output_data["mentions"]
samples = []
for mention in mentions:
record = {}
record["label"] = "unknown"
record["label_id"] = -1
# LOWERCASE EVERYTHING !
record["context_left"] = sentences[mention["sent_idx"]][
: mention["start_pos"]
].lower()
record["context_right"] = sentences[mention["sent_idx"]][
mention["end_pos"] :
].lower()
record["mention"] = mention["text"].lower()
record["start_pos"] = int(mention["start_pos"])
record["end_pos"] = int(mention["end_pos"])
record["sent_idx"] = mention["sent_idx"]
samples.append(record)
return samples
def _load_candidates(
entity_catalogue, entity_encoding, faiss_index=None, index_path=None, logger=None
):
# only load candidate encoding if not using faiss index
if faiss_index is None:
candidate_encoding = torch.load(entity_encoding)
indexer = None
else:
if logger:
logger.info("Using faiss index to retrieve entities.")
candidate_encoding = None
assert index_path is not None, "Error! Empty indexer path."
if faiss_index == "flat":
indexer = DenseFlatIndexer(1)
elif faiss_index == "hnsw":
indexer = DenseHNSWFlatIndexer(1)
else:
raise ValueError("Error! Unsupported indexer type! Choose from flat,hnsw.")
indexer.deserialize_from(index_path)
# load all the 5903527 entities
title2id = {}
id2title = {}
id2text = {}
wikipedia_id2local_id = {}
local_idx = 0
with open(entity_catalogue, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
if "idx" in entity:
split = entity["idx"].split("curid=")
if len(split) > 1:
wikipedia_id = int(split[-1].strip())
else:
wikipedia_id = entity["idx"].strip()
assert wikipedia_id not in wikipedia_id2local_id
wikipedia_id2local_id[wikipedia_id] = local_idx
title2id[entity["title"]] = local_idx
id2title[local_idx] = entity["title"]
id2text[local_idx] = entity["text"]
local_idx += 1
return (
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
indexer,
)
def __map_test_entities(test_entities_path, title2id, logger):
# load the 732859 tac_kbp_ref_know_base entities
kb2id = {}
missing_pages = 0
n = 0
with open(test_entities_path, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
if entity["title"] not in title2id:
missing_pages += 1
else:
kb2id[entity["entity_id"]] = title2id[entity["title"]]
n += 1
if logger:
logger.info("missing {}/{} pages".format(missing_pages, n))
return kb2id
def __load_test(test_filename, kb2id, wikipedia_id2local_id, logger):
test_samples = []
with open(test_filename, "r") as fin:
lines = fin.readlines()
for line in lines:
record = json.loads(line)
record["label"] = str(record["label_id"])
# for tac kbp we should use a separate knowledge source to get the entity id (label_id)
if kb2id and len(kb2id) > 0:
if record["label"] in kb2id:
record["label_id"] = kb2id[record["label"]]
else:
continue
# check that each entity id (label_id) is in the entity collection
elif wikipedia_id2local_id and len(wikipedia_id2local_id) > 0:
try:
key = int(record["label"].strip())
if key in wikipedia_id2local_id:
record["label_id"] = wikipedia_id2local_id[key]
else:
continue
except:
continue
# LOWERCASE EVERYTHING !
record["context_left"] = record["context_left"].lower()
record["context_right"] = record["context_right"].lower()
record["mention"] = record["mention"].lower()
test_samples.append(record)
if logger:
logger.info("{}/{} samples considered".format(len(test_samples), len(lines)))
return test_samples
def _get_test_samples(
test_filename, test_entities_path, title2id, wikipedia_id2local_id, logger
):
kb2id = None
if test_entities_path:
kb2id = __map_test_entities(test_entities_path, title2id, logger)
test_samples = __load_test(test_filename, kb2id, wikipedia_id2local_id, logger)
return test_samples
def _process_biencoder_dataloader(samples, tokenizer, biencoder_params):
_, tensor_data = process_mention_data(
samples,
tokenizer,
biencoder_params["max_context_length"],
biencoder_params["max_cand_length"],
silent=True,
logger=None,
debug=biencoder_params["debug"],
)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=biencoder_params["eval_batch_size"]
)
return dataloader
def _run_biencoder(biencoder, dataloader, candidate_encoding, top_k=100, indexer=None):
biencoder.model.eval()
labels = []
nns = []
all_scores = []
for batch in tqdm(dataloader):
context_input, _, label_ids = batch
with torch.no_grad():
if indexer is not None:
context_encoding = biencoder.encode_context(context_input).numpy()
context_encoding = np.ascontiguousarray(context_encoding)
scores, indicies = indexer.search_knn(context_encoding, top_k)
else:
scores = biencoder.score_candidate(
context_input, None, cand_encs=candidate_encoding # .to(device)
)
scores, indicies = scores.topk(top_k)
scores = scores.data.numpy()
indicies = indicies.data.numpy()
labels.extend(label_ids.data.numpy())
nns.extend(indicies)
all_scores.extend(scores)
return labels, nns, all_scores
def _process_crossencoder_dataloader(context_input, label_input, crossencoder_params):
tensor_data = TensorDataset(context_input, label_input)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=crossencoder_params["eval_batch_size"]
)
return dataloader
def _run_crossencoder(crossencoder, dataloader, logger, context_len, device="cuda"):
crossencoder.model.eval()
accuracy = 0.0
crossencoder.to(device)
res = evaluate(crossencoder, dataloader, device, logger, context_len, silent=False)
accuracy = res["normalized_accuracy"]
logits = res["logits"]
predictions = np.argsort(logits, axis=1)
return accuracy, predictions, logits
def load_models(args, logger=None):
# load biencoder model
if logger:
logger.info("loading biencoder model")
with open(args.biencoder_config) as json_file:
biencoder_params = json.load(json_file)
biencoder_params["path_to_model"] = args.biencoder_model
biencoder = load_biencoder(biencoder_params)
crossencoder = None
crossencoder_params = None
if not args.fast:
# load crossencoder model
if logger:
logger.info("loading crossencoder model")
with open(args.crossencoder_config) as json_file:
crossencoder_params = json.load(json_file)
crossencoder_params["path_to_model"] = args.crossencoder_model
crossencoder = load_crossencoder(crossencoder_params)
# load candidate entities
if logger:
logger.info("loading candidate entities")
(
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
faiss_indexer,
) = _load_candidates(
args.entity_catalogue,
args.entity_encoding,
faiss_index=args.faiss_index,
index_path=args.index_path,
logger=logger,
)
id2url = {
v: "https://en.wikipedia.org/wiki?curid=%s" % k
for k, v in wikipedia_id2local_id.items()
}
return (
biencoder,
biencoder_params,
crossencoder,
crossencoder_params,
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
faiss_indexer,
id2url
)
def link_text(text):
# Identify mentions
samples = _annotate(ner_model, [text])
_print_colorful_text(text, samples)
if len(samples) == 0:
return []
else:
return link_samples(samples, models, nil_bi_models, nil_models, logger)
def link_samples(samples, models, nil_bi_models=None, nil_models=None, logger=None):
biencoder = models[0]
biencoder_params = models[1]
candidate_encoding = models[4]
faiss_indexer = models[9]
id2title = models[6]
id2text = models[7]
id2url = models[10]
# don't look at labels
keep_all = True
if logger:
# prepare the data for biencoder
logger.info("preparing data for biencoder")
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params
)
if logger:
# run biencoder
logger.info("run biencoder")
top_k = args.top_k
labels, nns, scores = _run_biencoder(
biencoder,
dataloader,
candidate_encoding,
top_k,
faiss_indexer,
)
# nil prediction
if nil_bi_models:
nil_bi_model = nil_bi_models[0]
nil_bi_features = nil_bi_models[1]
nil_X = | pd.DataFrame() | pandas.DataFrame |
"""
This file imports the database and does a series of recursive histograms, where the data can be binned as a function
of one or more variables.
For instance, if the peclet numbers for the experiments range from 90 to 1e6, but I want to know which experiments
within that range result in an exponential profile, then I have to do this.
"""
import argparse
import os
import math
import numpy as np
import pandas as pd
from helper_functions import (bin_to_rp_shape, make_dirs)
# Default database
TRAINING_PATH = os.path.join('output', 'data', 'training_data.csv')
TARGET_PATH = os.path.join('output', 'data', 'target_data.csv')
_CATEGORICAL_FEATURES_TO_DROP = [
u'collector_coating=FeOOH', u'collector_coating=IronOxide',
u'collector_coating=None', u'enm_id=Ag', u'enm_id=C60',
u'enm_id=CeO2', u'enm_id=CuO', u'enm_id=Fe', u'enm_id=MWCNT',
u'enm_id=QD', u'enm_id=TiO2', u'enm_id=ZnO', u'enm_id=nBiochar',
u'enm_id=nHAP', u'nom_id=Alg', u'nom_id=Citric', u'nom_id=FA',
u'nom_id=Formic', u'nom_id=HA', u'nom_id=None', u'nom_id=Oxalic',
u'nom_id=SRHA', u'nom_id=TRIZMA']
_PARAMETER_BIN_SIZE_DICT = {
'n_Lo': 13,
'm_inf': 26,
'n_asp': 4,
'n_z1': 10,
'n_z2': 11,
'n_asp_c': 9,
'influent_concentration_enm': 11,
'concentration_nom': 10,
'n_att': 7,
'n_por': 10,
'n_g': 9,
'n_Pe': 8,
'n_dl': 7
}
_PARAMETER_BIN_SPACE_DICT = {
'n_Lo': 'linear',
'm_inf': 'linear',
'n_asp': 'log',
'n_z1': 'linear',
'n_z2': 'linear',
'n_asp_c': 'linear',
'influent_concentration_enm': 'linear',
'concentration_nom': 'linear',
'n_att': 'log',
'n_por': 'linear',
'n_g': 'log',
'n_Pe': 'log',
'n_dl': 'log'
}
def main(output_dir='output', training_path=TRAINING_PATH, target_path=TARGET_PATH):
"""Generate histograms."""
target_data = | pd.read_csv(target_path) | pandas.read_csv |
from __future__ import absolute_import, print_function, division
import scipy.sparse as sps
import os.path as op
import pandas as pd
import numpy as np
import pandas
import h5py
from cooler import api
import pytest
testdir = op.realpath(op.dirname(__file__))
datadir = op.join(testdir, "data")
def test_info(mock_cooler):
info = api.info(mock_cooler)
assert isinstance(info, dict)
def test_chromtable(mock_cooler):
table = api.chroms(mock_cooler)
assert np.all(table["length"] == mock_cooler["chroms"]["length"])
def test_bintable(mock_cooler):
chromID_lookup = pd.Series({"chr1": 0, "chr2": 1})
lo, hi = 2, 10
table = api.bins(mock_cooler, lo, hi)
assert np.all(chromID_lookup[table["chrom"]] == mock_cooler["bins"]["chrom"][lo:hi])
assert np.all(table["start"] == mock_cooler["bins"]["start"][lo:hi])
assert np.all(table["end"] == mock_cooler["bins"]["end"][lo:hi])
table = api.bins(mock_cooler, lo, hi, fields=["start", "end"])
assert np.all(table["start"] == mock_cooler["bins"]["start"][lo:hi])
assert np.all(table["end"] == mock_cooler["bins"]["end"][lo:hi])
def test_bintable_many_contigs():
# In a file with many contigs, bins/chrom does not have an ENUM header,
# so chromosome names are taken from the chroms/name
clr = api.Cooler(op.join(datadir, "manycontigs.1.cool"))
bins = clr.bins()[:10]
assert pd.api.types.is_categorical_dtype(bins["chrom"].dtype)
bins = clr.bins()[['chrom', 'start']][:10]
assert | pd.api.types.is_categorical_dtype(bins["chrom"].dtype) | pandas.api.types.is_categorical_dtype |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="slinear", downcast="infer")
tm.assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="nearest")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="nearest", downcast="infer")
tm.assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="zero")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="zero", downcast="infer")
tm.assert_series_equal(result, expected)
# quadratic
# GH #15662.
expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0])
result = s.interpolate(method="quadratic")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="quadratic", downcast="infer")
tm.assert_series_equal(result, expected)
# cubic
expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0])
result = s.interpolate(method="cubic")
tm.assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("limit", [-1, 0])
def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):
# GH 9217: make sure limit is greater than zero.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
with pytest.raises(ValueError, match="Limit must be greater than 0"):
s.interpolate(limit=limit, method=method, **kwargs)
def test_interpolate_invalid_float_limit(self, nontemporal_method):
# GH 9217: make sure limit is an integer.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
limit = 2.0
with pytest.raises(ValueError, match="Limit must be an integer"):
s.interpolate(limit=limit, method=method, **kwargs)
@pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"])
def test_interp_invalid_method(self, invalid_method):
s = Series([1, 3, np.nan, 12, np.nan, 25])
msg = f"method must be one of.* Got '{invalid_method}' instead"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method)
# When an invalid method and invalid limit (such as -1) are
# provided, the error message reflects the invalid method.
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method, limit=-1)
def test_interp_invalid_method_and_value(self):
# GH#36624
ser = Series([1, 3, np.nan, 12, np.nan, 25])
msg = "Cannot pass both fill_value and method"
with pytest.raises(ValueError, match=msg):
ser.interpolate(fill_value=3, method="pad")
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="forward")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD")
tm.assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan])
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="forward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan])
result = s.interpolate(method="linear", limit_direction="backward")
tm.assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
r"Invalid limit_direction: expecting one of \['forward', "
r"'backward', 'both'\], got 'abc'"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit=2, limit_direction="abc")
# raises an error even if no limit is specified.
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_direction="abc")
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(method="linear", limit_area="inside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
)
result = s.interpolate(method="linear", limit_area="inside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="inside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0])
result = s.interpolate(method="linear", limit_area="outside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
)
result = s.interpolate(method="linear", limit_area="outside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="backward"
)
tm.assert_series_equal(result, expected)
# raises an error even if limit type is wrong.
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_area="abc")
@pytest.mark.parametrize(
"method, limit_direction, expected",
[
("pad", "backward", "forward"),
("ffill", "backward", "forward"),
("backfill", "forward", "backward"),
("bfill", "forward", "backward"),
("pad", "both", "forward"),
("ffill", "both", "forward"),
("backfill", "both", "backward"),
("bfill", "both", "backward"),
],
)
def test_interp_limit_direction_raises(self, method, limit_direction, expected):
# https://github.com/pandas-dev/pandas/pull/34746
s = Series([1, 2, 3])
msg = f"`limit_direction` must be '{expected}' for method `{method}`"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method, limit_direction=limit_direction)
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0])
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = | Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan]) | pandas.Series |
import pandas as pd
import textacy
from dask import dataframe as dd
from dask.multiprocessing import get
from multiprocessing import cpu_count
import textacy
import pandas as pd
import en_core_web_sm
nlp = en_core_web_sm.load()
def dask_df_apply(df, text_col, textacy_col_name='textacy_doc', ncores=None, inplace=False):
"""
Use dask to parallelize apply textacy Doc object creation from a dataframe
Parameters
----------
df : DataFrame
Dataframe which holds the text
text_col : str
The name of the text column in the df
textacy_col_name : str
The name to give to the column with the textacy doc objects
ncores : int
Number of cores to use for multiprocessing. Defaults to all cores in cpu minus one.
inplace : bool
Whether to return the entire df with the textacy doc series concatenated
or only textacy doc series.
Default is False
Returns
-------
DataFrame / Series
Either the dataframe passed as arg with the textacy series as last column or
just the textacy column
"""
# If no number of cores to work with, default to max
if not ncores:
nCores = cpu_count() - 1
nCores
# Partition dask dataframe and map textacy doc apply
# Sometimes this fails because it can't infer the dtypes correctly
# meta=pd.Series(name=0, dtype='object') is a start
# This is also a start https://stackoverflow.com/questions/40019905/how-to-map-a-column-with-dask?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# Possibly both the inner lambda apply and outer lambda df both need metadata?
textacy_series = dd.from_pandas(df, npartitions=nCores).map_partitions(
lambda df : df[text_col].apply(lambda x : textacy.doc.Doc(x, lang=nlp))).compute(get=get)
# Name the series
textacy_series.name = textacy_col_name
# If inplace return the dataframe and textacy Series
if inplace:
return | pd.concat([df, textacy_series], axis=1) | pandas.concat |
"""Custom pandas accessors for generic data.
Methods can be accessed as follows:
* `GenericSRAccessor` -> `pd.Series.vbt.*`
* `GenericDFAccessor` -> `pd.DataFrame.vbt.*`
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> # vectorbt.generic.accessors.GenericAccessor.rolling_mean
>>> pd.Series([1, 2, 3, 4]).vbt.rolling_mean(2)
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
```
The accessors inherit `vectorbt.base.accessors` and are inherited by more
specialized accessors, such as `vectorbt.signals.accessors` and `vectorbt.returns.accessors`.
!!! note
Grouping is only supported by the methods that accept the `group_by` argument.
Accessors do not utilize caching.
Run for the examples below:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime, timedelta
>>> df = pd.DataFrame({
... 'a': [1, 2, 3, 4, 5],
... 'b': [5, 4, 3, 2, 1],
... 'c': [1, 2, 3, 2, 1]
... }, index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5)
... ]))
>>> df
a b c
2020-01-01 1 5 1
2020-01-02 2 4 2
2020-01-03 3 3 3
2020-01-04 4 2 2
2020-01-05 5 1 1
>>> index = [datetime(2020, 1, 1) + timedelta(days=i) for i in range(10)]
>>> sr = pd.Series(np.arange(len(index)), index=index)
>>> sr
2020-01-01 0
2020-01-02 1
2020-01-03 2
2020-01-04 3
2020-01-05 4
2020-01-06 5
2020-01-07 6
2020-01-08 7
2020-01-09 8
2020-01-10 9
dtype: int64
```
## Stats
!!! hint
See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `GenericAccessor.metrics`.
```python-repl
>>> df2 = pd.DataFrame({
... 'a': [np.nan, 2, 3],
... 'b': [4, np.nan, 5],
... 'c': [6, 7, np.nan]
... }, index=['x', 'y', 'z'])
>>> df2.vbt(freq='d').stats(column='a')
Start x
End z
Period 3 days 00:00:00
Count 2
Mean 2.5
Std 0.707107
Min 2.0
Median 2.5
Max 3.0
Min Index y
Max Index z
Name: a, dtype: object
```
### Mapping
Mapping can be set both in `GenericAccessor` (preferred) and `GenericAccessor.stats`:
```python-repl
>>> mapping = {x: 'test_' + str(x) for x in pd.unique(df2.values.flatten())}
>>> df2.vbt(freq='d', mapping=mapping).stats(column='a')
Start x
End z
Period 3 days 00:00:00
Count 2
Value Counts: test_2.0 1
Value Counts: test_3.0 1
Value Counts: test_4.0 0
Value Counts: test_5.0 0
Value Counts: test_6.0 0
Value Counts: test_7.0 0
Value Counts: test_nan 1
Name: a, dtype: object
>>> df2.vbt(freq='d').stats(column='a', settings=dict(mapping=mapping))
UserWarning: Changing the mapping will create a copy of this object.
Consider setting it upon object creation to re-use existing cache.
Start x
End z
Period 3 days 00:00:00
Count 2
Value Counts: test_2.0 1
Value Counts: test_3.0 1
Value Counts: test_4.0 0
Value Counts: test_5.0 0
Value Counts: test_6.0 0
Value Counts: test_7.0 0
Value Counts: test_nan 1
Name: a, dtype: object
```
Selecting a column before calling `stats` will consider uniques from this column only:
```python-repl
>>> df2['a'].vbt(freq='d', mapping=mapping).stats()
Start x
End z
Period 3 days 00:00:00
Count 2
Value Counts: test_2.0 1
Value Counts: test_3.0 1
Value Counts: test_nan 1
Name: a, dtype: object
```
To include all keys from `mapping`, pass `incl_all_keys=True`:
>>> df2['a'].vbt(freq='d', mapping=mapping).stats(settings=dict(incl_all_keys=True))
Start x
End z
Period 3 days 00:00:00
Count 2
Value Counts: test_2.0 1
Value Counts: test_3.0 1
Value Counts: test_4.0 0
Value Counts: test_5.0 0
Value Counts: test_6.0 0
Value Counts: test_7.0 0
Value Counts: test_nan 1
Name: a, dtype: object
```
`GenericAccessor.stats` also supports (re-)grouping:
```python-repl
>>> df2.vbt(freq='d').stats(column=0, group_by=[0, 0, 1])
Start x
End z
Period 3 days 00:00:00
Count 4
Mean 3.5
Std 1.290994
Min 2.0
Median 3.5
Max 5.0
Min Index y
Max Index z
Name: 0, dtype: object
```
"""
import numpy as np
import pandas as pd
from scipy import stats
from numba.typed import Dict
import warnings
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import (
Binarizer,
MinMaxScaler,
MaxAbsScaler,
Normalizer,
RobustScaler,
StandardScaler,
QuantileTransformer,
PowerTransformer
)
from vectorbt import _typing as tp
from vectorbt.utils import checks
from vectorbt.utils.config import Config, merge_dicts, resolve_dict
from vectorbt.utils.figure import make_figure, make_subplots
from vectorbt.utils.mapping import apply_mapping, to_mapping
from vectorbt.base import index_fns, reshape_fns
from vectorbt.base.accessors import BaseAccessor, BaseDFAccessor, BaseSRAccessor
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
from vectorbt.generic import plotting, nb
from vectorbt.generic.drawdowns import Drawdowns
from vectorbt.generic.splitters import SplitterT, RangeSplitter, RollingSplitter, ExpandingSplitter
from vectorbt.generic.stats_builder import StatsBuilderMixin
from vectorbt.generic.decorators import add_nb_methods, add_transform_methods
from vectorbt.records.mapped_array import MappedArray
try: # pragma: no cover
import bottleneck as bn
nanmean = bn.nanmean
nanstd = bn.nanstd
nansum = bn.nansum
nanmax = bn.nanmax
nanmin = bn.nanmin
nanmedian = bn.nanmedian
nanargmax = bn.nanargmax
nanargmin = bn.nanargmin
except ImportError:
# slower numpy
nanmean = np.nanmean
nanstd = np.nanstd
nansum = np.nansum
nanmax = np.nanmax
nanmin = np.nanmin
nanmedian = np.nanmedian
nanargmax = np.nanargmax
nanargmin = np.nanargmin
GenericAccessorT = tp.TypeVar("GenericAccessorT", bound="GenericAccessor")
SplitOutputT = tp.Union[tp.MaybeTuple[tp.Tuple[tp.Frame, tp.Index]], tp.BaseFigure]
class TransformerT(tp.Protocol):
def __init__(self, **kwargs) -> None:
...
def transform(self, *args, **kwargs) -> tp.Array2d:
...
def fit_transform(self, *args, **kwargs) -> tp.Array2d:
...
__pdoc__ = {}
nb_config = Config(
{
'shuffle': dict(func=nb.shuffle_nb, path='vectorbt.generic.nb.shuffle_nb'),
'fillna': dict(func=nb.fillna_nb, path='vectorbt.generic.nb.fillna_nb'),
'bshift': dict(func=nb.bshift_nb, path='vectorbt.generic.nb.bshift_nb'),
'fshift': dict(func=nb.fshift_nb, path='vectorbt.generic.nb.fshift_nb'),
'diff': dict(func=nb.diff_nb, path='vectorbt.generic.nb.diff_nb'),
'pct_change': dict(func=nb.pct_change_nb, path='vectorbt.generic.nb.pct_change_nb'),
'ffill': dict(func=nb.ffill_nb, path='vectorbt.generic.nb.ffill_nb'),
'cumsum': dict(func=nb.nancumsum_nb, path='vectorbt.generic.nb.nancumsum_nb'),
'cumprod': dict(func=nb.nancumprod_nb, path='vectorbt.generic.nb.nancumprod_nb'),
'rolling_min': dict(func=nb.rolling_min_nb, path='vectorbt.generic.nb.rolling_min_nb'),
'rolling_max': dict(func=nb.rolling_max_nb, path='vectorbt.generic.nb.rolling_max_nb'),
'rolling_mean': dict(func=nb.rolling_mean_nb, path='vectorbt.generic.nb.rolling_mean_nb'),
'expanding_min': dict(func=nb.expanding_min_nb, path='vectorbt.generic.nb.expanding_min_nb'),
'expanding_max': dict(func=nb.expanding_max_nb, path='vectorbt.generic.nb.expanding_max_nb'),
'expanding_mean': dict(func=nb.expanding_mean_nb, path='vectorbt.generic.nb.expanding_mean_nb'),
'product': dict(func=nb.nanprod_nb, is_reducing=True, path='vectorbt.generic.nb.nanprod_nb')
},
as_attrs=False,
readonly=True
)
"""_"""
__pdoc__['nb_config'] = f"""Config of Numba methods to be added to `GenericAccessor`.
```json
{nb_config.to_doc()}
```
"""
transform_config = Config(
{
'binarize': dict(
transformer=Binarizer,
docstring="See `sklearn.preprocessing.Binarizer`."
),
'minmax_scale': dict(
transformer=MinMaxScaler,
docstring="See `sklearn.preprocessing.MinMaxScaler`."
),
'maxabs_scale': dict(
transformer=MaxAbsScaler,
docstring="See `sklearn.preprocessing.MaxAbsScaler`."
),
'normalize': dict(
transformer=Normalizer,
docstring="See `sklearn.preprocessing.Normalizer`."
),
'robust_scale': dict(
transformer=RobustScaler,
docstring="See `sklearn.preprocessing.RobustScaler`."
),
'scale': dict(
transformer=StandardScaler,
docstring="See `sklearn.preprocessing.StandardScaler`."
),
'quantile_transform': dict(
transformer=QuantileTransformer,
docstring="See `sklearn.preprocessing.QuantileTransformer`."
),
'power_transform': dict(
transformer=PowerTransformer,
docstring="See `sklearn.preprocessing.PowerTransformer`."
)
},
as_attrs=False,
readonly=True
)
"""_"""
__pdoc__['transform_config'] = f"""Config of transform methods to be added to `GenericAccessor`.
```json
{transform_config.to_doc()}
```
"""
@add_nb_methods(nb_config)
@add_transform_methods(transform_config)
class GenericAccessor(BaseAccessor, StatsBuilderMixin):
"""Accessor on top of data of any type. For both, Series and DataFrames.
Accessible through `pd.Series.vbt` and `pd.DataFrame.vbt`."""
def __init__(self, obj: tp.SeriesFrame, mapping: tp.Optional[tp.MappingLike] = None, **kwargs) -> None:
if mapping is not None:
mapping = to_mapping(mapping)
self._mapping = mapping
BaseAccessor.__init__(self, obj, mapping=mapping, **kwargs)
StatsBuilderMixin.__init__(self)
@property
def sr_accessor_cls(self):
"""Accessor class for `pd.Series`."""
return GenericSRAccessor
@property
def df_accessor_cls(self):
"""Accessor class for `pd.DataFrame`."""
return GenericDFAccessor
@property
def mapping(self) -> tp.Optional[tp.Mapping]:
"""Mapping."""
return self._mapping
def apply_mapping(self, **kwargs) -> tp.SeriesFrame:
"""See `vectorbt.utils.mapping.apply_mapping`."""
return apply_mapping(self.obj, self.mapping, **kwargs)
def rolling_std(self, window: int, minp: tp.Optional[int] = None, ddof: int = 1,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: # pragma: no cover
"""See `vectorbt.generic.nb.rolling_std_nb`."""
out = nb.rolling_std_nb(self.to_2d_array(), window, minp=minp, ddof=ddof)
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def expanding_std(self, minp: tp.Optional[int] = 1, ddof: int = 1,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: # pragma: no cover
"""See `vectorbt.generic.nb.expanding_std_nb`."""
out = nb.expanding_std_nb(self.to_2d_array(), minp=minp, ddof=ddof)
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def ewm_mean(self, span: int, minp: tp.Optional[int] = 0, adjust: bool = True,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: # pragma: no cover
"""See `vectorbt.generic.nb.ewm_mean_nb`."""
out = nb.ewm_mean_nb(self.to_2d_array(), span, minp=minp, adjust=adjust)
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def ewm_std(self, span: int, minp: tp.Optional[int] = 0, adjust: bool = True, ddof: int = 1,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: # pragma: no cover
"""See `vectorbt.generic.nb.ewm_std_nb`."""
out = nb.ewm_std_nb(self.to_2d_array(), span, minp=minp, adjust=adjust, ddof=ddof)
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def apply_along_axis(self, apply_func_nb: tp.Union[tp.ApplyFunc, tp.RowApplyFunc], *args, axis: int = 0,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Apply a function `apply_func_nb` along an axis."""
checks.assert_numba_func(apply_func_nb)
if axis == 0:
out = nb.apply_nb(self.to_2d_array(), apply_func_nb, *args)
elif axis == 1:
out = nb.row_apply_nb(self.to_2d_array(), apply_func_nb, *args)
else:
raise ValueError("Only axes 0 and 1 are supported")
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def rolling_apply(self, window: int, apply_func_nb: tp.Union[tp.RollApplyFunc, nb.tp.RollMatrixApplyFunc],
*args, minp: tp.Optional[int] = None, on_matrix: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.rolling_apply_nb` and
`vectorbt.generic.nb.rolling_matrix_apply_nb` for `on_matrix=True`.
## Example
```python-repl
>>> mean_nb = njit(lambda i, col, a: np.nanmean(a))
>>> df.vbt.rolling_apply(3, mean_nb)
a b c
2020-01-01 1.0 5.0 1.000000
2020-01-02 1.5 4.5 1.500000
2020-01-03 2.0 4.0 2.000000
2020-01-04 3.0 3.0 2.333333
2020-01-05 4.0 2.0 2.000000
>>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
>>> df.vbt.rolling_apply(3, mean_matrix_nb, on_matrix=True)
a b c
2020-01-01 2.333333 2.333333 2.333333
2020-01-02 2.500000 2.500000 2.500000
2020-01-03 2.666667 2.666667 2.666667
2020-01-04 2.777778 2.777778 2.777778
2020-01-05 2.666667 2.666667 2.666667
```
"""
checks.assert_numba_func(apply_func_nb)
if on_matrix:
out = nb.rolling_matrix_apply_nb(self.to_2d_array(), window, minp, apply_func_nb, *args)
else:
out = nb.rolling_apply_nb(self.to_2d_array(), window, minp, apply_func_nb, *args)
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def expanding_apply(self, apply_func_nb: tp.Union[tp.RollApplyFunc, nb.tp.RollMatrixApplyFunc],
*args, minp: tp.Optional[int] = 1, on_matrix: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.expanding_apply_nb` and
`vectorbt.generic.nb.expanding_matrix_apply_nb` for `on_matrix=True`.
## Example
```python-repl
>>> mean_nb = njit(lambda i, col, a: np.nanmean(a))
>>> df.vbt.expanding_apply(mean_nb)
a b c
2020-01-01 1.0 5.0 1.0
2020-01-02 1.5 4.5 1.5
2020-01-03 2.0 4.0 2.0
2020-01-04 2.5 3.5 2.0
2020-01-05 3.0 3.0 1.8
>>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
>>> df.vbt.expanding_apply(mean_matrix_nb, on_matrix=True)
a b c
2020-01-01 2.333333 2.333333 2.333333
2020-01-02 2.500000 2.500000 2.500000
2020-01-03 2.666667 2.666667 2.666667
2020-01-04 2.666667 2.666667 2.666667
2020-01-05 2.600000 2.600000 2.600000
```
"""
checks.assert_numba_func(apply_func_nb)
if on_matrix:
out = nb.expanding_matrix_apply_nb(self.to_2d_array(), minp, apply_func_nb, *args)
else:
out = nb.expanding_apply_nb(self.to_2d_array(), minp, apply_func_nb, *args)
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def groupby_apply(self, by: tp.PandasGroupByLike,
apply_func_nb: tp.Union[tp.GroupByApplyFunc, tp.GroupByMatrixApplyFunc],
*args, on_matrix: bool = False, wrap_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.groupby_apply_nb` and
`vectorbt.generic.nb.groupby_matrix_apply_nb` for `on_matrix=True`.
For `by`, see `pd.DataFrame.groupby`.
## Example
```python-repl
>>> mean_nb = njit(lambda i, col, a: np.nanmean(a))
>>> df.vbt.groupby_apply([1, 1, 2, 2, 3], mean_nb)
a b c
1 1.5 4.5 1.5
2 3.5 2.5 2.5
3 5.0 1.0 1.0
>>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
>>> df.vbt.groupby_apply([1, 1, 2, 2, 3], mean_matrix_nb, on_matrix=True)
a b c
1 2.500000 2.500000 2.500000
2 2.833333 2.833333 2.833333
3 2.333333 2.333333 2.333333
```
"""
checks.assert_numba_func(apply_func_nb)
regrouped = self.obj.groupby(by, axis=0, **kwargs)
groups = Dict()
for i, (k, v) in enumerate(regrouped.indices.items()):
groups[i] = np.asarray(v)
if on_matrix:
out = nb.groupby_matrix_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
else:
out = nb.groupby_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
wrap_kwargs = merge_dicts(dict(name_or_index=list(regrouped.indices.keys())), wrap_kwargs)
return self.wrapper.wrap_reduced(out, group_by=False, **wrap_kwargs)
def resample_apply(self, freq: tp.PandasFrequencyLike,
apply_func_nb: tp.Union[tp.GroupByApplyFunc, tp.GroupByMatrixApplyFunc],
*args, on_matrix: bool = False, wrap_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.groupby_apply_nb` and
`vectorbt.generic.nb.groupby_matrix_apply_nb` for `on_matrix=True`.
For `freq`, see `pd.DataFrame.resample`.
## Example
```python-repl
>>> mean_nb = njit(lambda i, col, a: np.nanmean(a))
>>> df.vbt.resample_apply('2d', mean_nb)
a b c
2020-01-01 1.5 4.5 1.5
2020-01-03 3.5 2.5 2.5
2020-01-05 5.0 1.0 1.0
>>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
>>> df.vbt.resample_apply('2d', mean_matrix_nb, on_matrix=True)
a b c
2020-01-01 2.500000 2.500000 2.500000
2020-01-03 2.833333 2.833333 2.833333
2020-01-05 2.333333 2.333333 2.333333
```
"""
checks.assert_numba_func(apply_func_nb)
resampled = self.obj.resample(freq, axis=0, **kwargs)
groups = Dict()
for i, (k, v) in enumerate(resampled.indices.items()):
groups[i] = np.asarray(v)
if on_matrix:
out = nb.groupby_matrix_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
else:
out = nb.groupby_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
out_obj = self.wrapper.wrap(out, group_by=False, index=list(resampled.indices.keys()))
resampled_arr = np.full((resampled.ngroups, self.to_2d_array().shape[1]), np.nan)
resampled_obj = self.wrapper.wrap(
resampled_arr,
index=resampled.asfreq().index,
group_by=False,
**merge_dicts({}, wrap_kwargs)
)
resampled_obj.loc[out_obj.index] = out_obj.values
return resampled_obj
def applymap(self, apply_func_nb: tp.ApplyMapFunc, *args,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.applymap_nb`.
## Example
```python-repl
>>> multiply_nb = njit(lambda i, col, a: a ** 2)
>>> df.vbt.applymap(multiply_nb)
a b c
2020-01-01 1.0 25.0 1.0
2020-01-02 4.0 16.0 4.0
2020-01-03 9.0 9.0 9.0
2020-01-04 16.0 4.0 4.0
2020-01-05 25.0 1.0 1.0
```
"""
checks.assert_numba_func(apply_func_nb)
out = nb.applymap_nb(self.to_2d_array(), apply_func_nb, *args)
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def filter(self, filter_func_nb: tp.FilterFunc, *args,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.filter_nb`.
## Example
```python-repl
>>> greater_nb = njit(lambda i, col, a: a > 2)
>>> df.vbt.filter(greater_nb)
a b c
2020-01-01 NaN 5.0 NaN
2020-01-02 NaN 4.0 NaN
2020-01-03 3.0 3.0 3.0
2020-01-04 4.0 NaN NaN
2020-01-05 5.0 NaN NaN
```
"""
checks.assert_numba_func(filter_func_nb)
out = nb.filter_nb(self.to_2d_array(), filter_func_nb, *args)
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
def apply_and_reduce(self, apply_func_nb: tp.ApplyFunc, reduce_func_nb: tp.ReduceFunc,
apply_args: tp.Optional[tuple] = None, reduce_args: tp.Optional[tuple] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""See `vectorbt.generic.nb.apply_and_reduce_nb`.
## Example
```python-repl
>>> greater_nb = njit(lambda col, a: a[a > 2])
>>> mean_nb = njit(lambda col, a: np.nanmean(a))
>>> df.vbt.apply_and_reduce(greater_nb, mean_nb)
a 4.0
b 4.0
c 3.0
dtype: float64
```
"""
checks.assert_numba_func(apply_func_nb)
checks.assert_numba_func(reduce_func_nb)
if apply_args is None:
apply_args = ()
if reduce_args is None:
reduce_args = ()
out = nb.apply_and_reduce_nb(self.to_2d_array(), apply_func_nb, apply_args, reduce_func_nb, reduce_args)
wrap_kwargs = merge_dicts(dict(name_or_index='apply_and_reduce'), wrap_kwargs)
return self.wrapper.wrap_reduced(out, group_by=False, **wrap_kwargs)
def reduce(self,
reduce_func_nb: tp.Union[
tp.FlatGroupReduceFunc,
tp.FlatGroupReduceArrayFunc,
tp.GroupReduceFunc,
tp.GroupReduceArrayFunc,
tp.ReduceFunc,
tp.ReduceArrayFunc
],
*args,
returns_array: bool = False,
returns_idx: bool = False,
flatten: bool = False,
order: str = 'C',
to_index: bool = True,
group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeriesFrame[float]:
"""Reduce by column.
See `vectorbt.generic.nb.flat_reduce_grouped_to_array_nb` if grouped, `returns_array` is True and `flatten` is True.
See `vectorbt.generic.nb.flat_reduce_grouped_nb` if grouped, `returns_array` is False and `flatten` is True.
See `vectorbt.generic.nb.reduce_grouped_to_array_nb` if grouped, `returns_array` is True and `flatten` is False.
See `vectorbt.generic.nb.reduce_grouped_nb` if grouped, `returns_array` is False and `flatten` is False.
See `vectorbt.generic.nb.reduce_to_array_nb` if not grouped and `returns_array` is True.
See `vectorbt.generic.nb.reduce_nb` if not grouped and `returns_array` is False.
Set `returns_idx` to True if values returned by `reduce_func_nb` are indices/positions.
Set `to_index` to False to return raw positions instead of labels.
## Example
```python-repl
>>> mean_nb = njit(lambda col, a: np.nanmean(a))
>>> df.vbt.reduce(mean_nb)
a 3.0
b 3.0
c 1.8
dtype: float64
>>> argmax_nb = njit(lambda col, a: np.argmax(a))
>>> df.vbt.reduce(argmax_nb, returns_idx=True)
a 2020-01-05
b 2020-01-01
c 2020-01-03
dtype: datetime64[ns]
>>> argmax_nb = njit(lambda col, a: np.argmax(a))
>>> df.vbt.reduce(argmax_nb, returns_idx=True, to_index=False)
a 4
b 0
c 2
dtype: int64
>>> min_max_nb = njit(lambda col, a: np.array([np.nanmin(a), np.nanmax(a)]))
>>> df.vbt.reduce(min_max_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max']))
a b c
min 1.0 1.0 1.0
max 5.0 5.0 3.0
>>> group_by = pd.Series(['first', 'first', 'second'], name='group')
>>> df.vbt.reduce(mean_nb, group_by=group_by)
group
first 3.0
second 1.8
dtype: float64
>>> df.vbt.reduce(min_max_nb, name_or_index=['min', 'max'],
... returns_array=True, group_by=group_by)
group first second
min 1.0 1.0
max 5.0 3.0
```
"""
checks.assert_numba_func(reduce_func_nb)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
if flatten:
checks.assert_in(order.upper(), ['C', 'F'])
in_c_order = order.upper() == 'C'
if returns_array:
out = nb.flat_reduce_grouped_to_array_nb(
self.to_2d_array(), group_lens, in_c_order, reduce_func_nb, *args)
else:
out = nb.flat_reduce_grouped_nb(
self.to_2d_array(), group_lens, in_c_order, reduce_func_nb, *args)
if returns_idx:
if in_c_order:
out //= group_lens # flattened in C order
else:
out %= self.wrapper.shape[0] # flattened in F order
else:
if returns_array:
out = nb.reduce_grouped_to_array_nb(
self.to_2d_array(), group_lens, reduce_func_nb, *args)
else:
out = nb.reduce_grouped_nb(
self.to_2d_array(), group_lens, reduce_func_nb, *args)
else:
if returns_array:
out = nb.reduce_to_array_nb(
self.to_2d_array(), reduce_func_nb, *args)
else:
out = nb.reduce_nb(
self.to_2d_array(), reduce_func_nb, *args)
# Perform post-processing
wrap_kwargs = merge_dicts(dict(
name_or_index='reduce' if not returns_array else None,
to_index=returns_idx and to_index,
fillna=-1 if returns_idx else None,
dtype=np.int_ if returns_idx else None
), wrap_kwargs)
return self.wrapper.wrap_reduced(out, group_by=group_by, **wrap_kwargs)
def min(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return min of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='min'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.min_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanmin = np.nanmin
else:
_nanmin = nanmin
return self.wrapper.wrap_reduced(_nanmin(arr, axis=0), group_by=False, **wrap_kwargs)
def max(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return max of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='max'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.max_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanmax = np.nanmax
else:
_nanmax = nanmax
return self.wrapper.wrap_reduced(_nanmax(arr, axis=0), group_by=False, **wrap_kwargs)
def mean(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return mean of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='mean'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(
nb.mean_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanmean = np.nanmean
else:
_nanmean = nanmean
return self.wrapper.wrap_reduced(_nanmean(arr, axis=0), group_by=False, **wrap_kwargs)
def median(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return median of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='median'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.median_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanmedian = np.nanmedian
else:
_nanmedian = nanmedian
return self.wrapper.wrap_reduced(_nanmedian(arr, axis=0), group_by=False, **wrap_kwargs)
def std(self, ddof: int = 1, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return standard deviation of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='std'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.std_reduce_nb, ddof, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanstd = np.nanstd
else:
_nanstd = nanstd
return self.wrapper.wrap_reduced(_nanstd(arr, ddof=ddof, axis=0), group_by=False, **wrap_kwargs)
def sum(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return sum of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='sum'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.sum_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nansum = np.nansum
else:
_nansum = nansum
return self.wrapper.wrap_reduced(_nansum(arr, axis=0), group_by=False, **wrap_kwargs)
def count(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return count of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='count', dtype=np.int_), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.count_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
return self.wrapper.wrap_reduced(np.sum(~np.isnan(self.to_2d_array()), axis=0), group_by=False, **wrap_kwargs)
def idxmin(self, group_by: tp.GroupByLike = None, order: str = 'C',
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return labeled index of min of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='idxmin'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(
nb.argmin_reduce_nb,
group_by=group_by,
flatten=True,
returns_idx=True,
order=order,
wrap_kwargs=wrap_kwargs
)
obj = self.to_2d_array()
out = np.full(obj.shape[1], np.nan, dtype=object)
nan_mask = np.all(np.isnan(obj), axis=0)
out[~nan_mask] = self.wrapper.index[nanargmin(obj[:, ~nan_mask], axis=0)]
return self.wrapper.wrap_reduced(out, group_by=False, **wrap_kwargs)
def idxmax(self, group_by: tp.GroupByLike = None, order: str = 'C',
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return labeled index of max of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='idxmax'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(
nb.argmax_reduce_nb,
group_by=group_by,
flatten=True,
returns_idx=True,
order=order,
wrap_kwargs=wrap_kwargs
)
obj = self.to_2d_array()
out = np.full(obj.shape[1], np.nan, dtype=object)
nan_mask = np.all(np.isnan(obj), axis=0)
out[~nan_mask] = self.wrapper.index[nanargmax(obj[:, ~nan_mask], axis=0)]
return self.wrapper.wrap_reduced(out, group_by=False, **wrap_kwargs)
def describe(self, percentiles: tp.Optional[tp.ArrayLike] = None, ddof: int = 1,
group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.describe_reduce_nb`.
For `percentiles`, see `pd.DataFrame.describe`.
## Example
```python-repl
>>> df.vbt.describe()
a b c
count 5.000000 5.000000 5.00000
mean 3.000000 3.000000 1.80000
std 1.581139 1.581139 0.83666
min 1.000000 1.000000 1.00000
25% 2.000000 2.000000 1.00000
50% 3.000000 3.000000 2.00000
75% 4.000000 4.000000 2.00000
max 5.000000 5.000000 3.00000
```
"""
if percentiles is not None:
percentiles = reshape_fns.to_1d_array(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
percentiles = percentiles.tolist()
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.unique(percentiles)
perc_formatted = pd.io.formats.format.format_percentiles(percentiles)
index = pd.Index(['count', 'mean', 'std', 'min', *perc_formatted, 'max'])
wrap_kwargs = merge_dicts(dict(name_or_index=index), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(
nb.describe_reduce_nb, percentiles, ddof,
group_by=group_by, flatten=True, returns_array=True,
wrap_kwargs=wrap_kwargs)
return self.reduce(
nb.describe_reduce_nb, percentiles, ddof,
returns_array=True, wrap_kwargs=wrap_kwargs)
def value_counts(self,
normalize: bool = False,
sort_uniques: bool = True,
sort: bool = False,
ascending: bool = False,
dropna: bool = False,
group_by: tp.GroupByLike = None,
mapping: tp.Optional[tp.MappingLike] = None,
incl_all_keys: bool = False,
wrap_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""Return a Series/DataFrame containing counts of unique values.
* Enable `normalize` flag to return the relative frequencies of the unique values.
* Enable `sort_uniques` flag to sort uniques.
* Enable `sort` flag to sort by frequencies.
* Enable `ascending` flag to sort in ascending order.
* Enable `dropna` flag to exclude counts of NaN.
* Enable `incl_all_keys` to include all mapping keys, no only those that are present in the array.
Mapping will be applied using `vectorbt.utils.mapping.apply_mapping` with `**kwargs`."""
if mapping is None:
mapping = self.mapping
codes, uniques = pd.factorize(self.obj.values.flatten(), sort=False, na_sentinel=None)
codes = codes.reshape(self.wrapper.shape_2d)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
value_counts = nb.value_counts_nb(codes, len(uniques), group_lens)
if incl_all_keys and mapping is not None:
missing_keys = []
for x in mapping:
if pd.isnull(x) and pd.isnull(uniques).any():
continue
if x not in uniques:
missing_keys.append(x)
value_counts = np.vstack((value_counts, np.full((len(missing_keys), value_counts.shape[1]), 0)))
uniques = np.concatenate((uniques, np.array(missing_keys)))
nan_mask = np.isnan(uniques)
if dropna:
value_counts = value_counts[~nan_mask]
uniques = uniques[~nan_mask]
if sort_uniques:
new_indices = uniques.argsort()
value_counts = value_counts[new_indices]
uniques = uniques[new_indices]
value_counts_sum = value_counts.sum(axis=1)
if normalize:
value_counts = value_counts / value_counts_sum.sum()
if sort:
if ascending:
new_indices = value_counts_sum.argsort()
else:
new_indices = (-value_counts_sum).argsort()
value_counts = value_counts[new_indices]
uniques = uniques[new_indices]
value_counts_pd = self.wrapper.wrap(
value_counts,
index=uniques,
group_by=group_by,
**merge_dicts({}, wrap_kwargs)
)
if mapping is not None:
value_counts_pd.index = apply_mapping(value_counts_pd.index, mapping, **kwargs)
return value_counts_pd
# ############# Resolution ############# #
def resolve_self(self: GenericAccessorT,
cond_kwargs: tp.KwargsLike = None,
custom_arg_names: tp.Optional[tp.Set[str]] = None,
impacts_caching: bool = True,
silence_warnings: bool = False) -> GenericAccessorT:
"""Resolve self.
See `vectorbt.base.array_wrapper.Wrapping.resolve_self`.
Creates a copy of this instance `mapping` is different in `cond_kwargs`."""
if cond_kwargs is None:
cond_kwargs = {}
if custom_arg_names is None:
custom_arg_names = set()
reself = Wrapping.resolve_self(
self,
cond_kwargs=cond_kwargs,
custom_arg_names=custom_arg_names,
impacts_caching=impacts_caching,
silence_warnings=silence_warnings
)
if 'mapping' in cond_kwargs:
self_copy = reself.copy(mapping=cond_kwargs['mapping'])
if not checks.is_deep_equal(self_copy.mapping, reself.mapping):
if not silence_warnings:
warnings.warn(f"Changing the mapping will create a copy of this object. "
f"Consider setting it upon object creation to re-use existing cache.", stacklevel=2)
for alias in reself.self_aliases:
if alias not in custom_arg_names:
cond_kwargs[alias] = self_copy
cond_kwargs['mapping'] = self_copy.mapping
if impacts_caching:
cond_kwargs['use_caching'] = False
return self_copy
return reself
# ############# Stats ############# #
@property
def stats_defaults(self) -> tp.Kwargs:
"""Defaults for `GenericAccessor.stats`.
Merges `vectorbt.generic.stats_builder.StatsBuilderMixin.stats_defaults` and
`generic.stats` in `vectorbt._settings.settings`."""
from vectorbt._settings import settings
generic_stats_cfg = settings['generic']['stats']
return merge_dicts(
StatsBuilderMixin.stats_defaults.__get__(self),
generic_stats_cfg
)
_metrics: tp.ClassVar[Config] = Config(
dict(
start=dict(
title='Start',
calc_func=lambda self: self.wrapper.index[0],
agg_func=None,
tags='wrapper'
),
end=dict(
title='End',
calc_func=lambda self: self.wrapper.index[-1],
agg_func=None,
tags='wrapper'
),
period=dict(
title='Period',
calc_func=lambda self: len(self.wrapper.index),
apply_to_timedelta=True,
agg_func=None,
tags='wrapper'
),
count=dict(
title='Count',
calc_func='count',
inv_check_has_mapping=True,
tags=['generic', 'describe']
),
mean=dict(
title='Mean',
calc_func='mean',
inv_check_has_mapping=True,
tags=['generic', 'describe']
),
std=dict(
title='Std',
calc_func='std',
inv_check_has_mapping=True,
tags=['generic', 'describe']
),
min=dict(
title='Min',
calc_func='min',
inv_check_has_mapping=True,
tags=['generic', 'describe']
),
median=dict(
title='Median',
calc_func='median',
inv_check_has_mapping=True,
tags=['generic', 'describe']
),
max=dict(
title='Max',
calc_func='max',
inv_check_has_mapping=True,
tags=['generic', 'describe']
),
idx_min=dict(
title='Min Index',
calc_func='idxmin',
agg_func=None,
inv_check_has_mapping=True,
tags=['generic', 'index']
),
idx_max=dict(
title='Max Index',
calc_func='idxmax',
agg_func=None,
inv_check_has_mapping=True,
tags=['generic', 'index']
),
value_counts=dict(
title='Value Counts',
calc_func=lambda value_counts: reshape_fns.to_dict(value_counts, orient='index_series'),
resolve_value_counts=True,
check_has_mapping=True,
tags=['generic', 'value_counts']
)
),
copy_kwargs=dict(copy_mode='deep')
)
@property
def metrics(self) -> Config:
return self._metrics
# ############# Conversion ############# #
def drawdown(self, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Drawdown series."""
out = self.to_2d_array() / nb.expanding_max_nb(self.to_2d_array()) - 1
return self.wrapper.wrap(out, group_by=False, **merge_dicts({}, wrap_kwargs))
@property
def drawdowns(self) -> Drawdowns:
"""`GenericAccessor.get_drawdowns` with default arguments."""
return self.get_drawdowns()
def get_drawdowns(self, group_by: tp.GroupByLike = None, **kwargs) -> Drawdowns:
"""Generate drawdown records.
See `vectorbt.generic.drawdowns.Drawdowns`."""
if group_by is None:
group_by = self.wrapper.grouper.group_by
return Drawdowns.from_ts(self.obj, freq=self.wrapper.freq, group_by=group_by, **kwargs)
def to_mapped(self,
dropna: bool = True,
dtype: tp.Optional[tp.DTypeLike] = None,
group_by: tp.GroupByLike = None,
**kwargs) -> MappedArray:
"""Convert this object into an instance of `vectorbt.records.mapped_array.MappedArray`."""
mapped_arr = self.to_2d_array().flatten(order='F')
col_arr = np.repeat(np.arange(self.wrapper.shape_2d[1]), self.wrapper.shape_2d[0])
idx_arr = np.tile(np.arange(self.wrapper.shape_2d[0]), self.wrapper.shape_2d[1])
if dropna and np.isnan(mapped_arr).any():
not_nan_mask = ~np.isnan(mapped_arr)
mapped_arr = mapped_arr[not_nan_mask]
col_arr = col_arr[not_nan_mask]
idx_arr = idx_arr[not_nan_mask]
return MappedArray(
self.wrapper,
np.asarray(mapped_arr, dtype=dtype),
col_arr,
idx_arr=idx_arr,
**kwargs
).regroup(group_by)
def to_returns(self, **kwargs) -> tp.SeriesFrame:
"""Get returns of this object."""
return self.obj.vbt.returns.from_value(self.obj, **kwargs).obj
# ############# Transformation ############# #
def transform(self, transformer: TransformerT, wrap_kwargs: tp.KwargsLike = None, **kwargs) -> tp.SeriesFrame:
"""Transform using a transformer.
A transformer can be any class instance that has `transform` and `fit_transform` methods,
ideally subclassing `sklearn.base.TransformerMixin` and `sklearn.base.BaseEstimator`.
Will fit `transformer` if not fitted.
`**kwargs` are passed to the `transform` or `fit_transform` method.
## Example
```python-repl
>>> from sklearn.preprocessing import MinMaxScaler
>>> df.vbt.transform(MinMaxScaler((-1, 1)))
a b c
2020-01-01 -1.0 1.0 -1.0
2020-01-02 -0.5 0.5 0.0
2020-01-03 0.0 0.0 1.0
2020-01-04 0.5 -0.5 0.0
2020-01-05 1.0 -1.0 -1.0
>>> fitted_scaler = MinMaxScaler((-1, 1)).fit(np.array([[2], [4]]))
>>> df.vbt.transform(fitted_scaler)
a b c
2020-01-01 -2.0 2.0 -2.0
2020-01-02 -1.0 1.0 -1.0
2020-01-03 0.0 0.0 0.0
2020-01-04 1.0 -1.0 -1.0
2020-01-05 2.0 -2.0 -2.0
```"""
is_fitted = True
try:
check_is_fitted(transformer)
except NotFittedError:
is_fitted = False
if not is_fitted:
result = transformer.fit_transform(self.to_2d_array(), **kwargs)
else:
result = transformer.transform(self.to_2d_array(), **kwargs)
return self.wrapper.wrap(result, group_by=False, **merge_dicts({}, wrap_kwargs))
def zscore(self, **kwargs) -> tp.SeriesFrame:
"""Compute z-score using `sklearn.preprocessing.StandardScaler`."""
return self.scale(with_mean=True, with_std=True, **kwargs)
# ############# Splitting ############# #
def split(self, splitter: SplitterT, stack_kwargs: tp.KwargsLike = None, keys: tp.Optional[tp.IndexLike] = None,
plot: bool = False, trace_names: tp.TraceNames = None, heatmap_kwargs: tp.KwargsLike = None,
**kwargs) -> SplitOutputT:
"""Split using a splitter.
Returns a tuple of tuples, each corresponding to a set and composed of a dataframe and split indexes.
A splitter can be any class instance that has `split` method, ideally subclassing
`sklearn.model_selection.BaseCrossValidator` or `vectorbt.generic.splitters.BaseSplitter`.
`heatmap_kwargs` are passed to `vectorbt.generic.plotting.Heatmap` if `plot` is True,
can be a dictionary or a list per set, for example, to set trace name for each set ('train', 'test', etc.).
`**kwargs` are passed to the `split` method.
!!! note
The datetime-like format of the index will be lost as result of this operation.
Make sure to store the index metadata such as frequency information beforehand.
## Example
```python-repl
>>> from sklearn.model_selection import TimeSeriesSplit
>>> splitter = TimeSeriesSplit(n_splits=3)
>>> (train_df, train_indexes), (test_df, test_indexes) = sr.vbt.split(splitter)
>>> train_df
split_idx 0 1 2
0 0.0 0.0 0
1 1.0 1.0 1
2 2.0 2.0 2
3 3.0 3.0 3
4 NaN 4.0 4
5 NaN 5.0 5
6 NaN NaN 6
7 NaN NaN 7
>>> train_indexes
[DatetimeIndex(['2020-01-01', ..., '2020-01-04'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-01', ..., '2020-01-06'], dtype='datetime64[ns]', name='split_1'),
DatetimeIndex(['2020-01-01', ..., '2020-01-08'], dtype='datetime64[ns]', name='split_2')]
>>> test_df
split_idx 0 1 2
0 4 6 8
1 5 7 9
>>> test_indexes
[DatetimeIndex(['2020-01-05', '2020-01-06'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-07', '2020-01-08'], dtype='datetime64[ns]', name='split_1'),
DatetimeIndex(['2020-01-09', '2020-01-10'], dtype='datetime64[ns]', name='split_2')]
>>> sr.vbt.split(splitter, plot=True, trace_names=['train', 'test'])
```

"""
total_range_sr = pd.Series(np.arange(len(self.wrapper.index)), index=self.wrapper.index)
set_ranges = list(splitter.split(total_range_sr, **kwargs))
if len(set_ranges) == 0:
raise ValueError("No splits were generated")
idxs_by_split_and_set = list(zip(*set_ranges))
results = []
if keys is not None:
if not isinstance(keys, pd.Index):
keys = pd.Index(keys)
for idxs_by_split in idxs_by_split_and_set:
split_dfs = []
split_indexes = []
for split_idx, idxs in enumerate(idxs_by_split):
split_dfs.append(self.obj.iloc[idxs].reset_index(drop=True))
if keys is not None:
split_name = keys[split_idx]
else:
split_name = 'split_' + str(split_idx)
split_indexes.append( | pd.Index(self.wrapper.index[idxs], name=split_name) | pandas.Index |
# util.py (lciafmt)
# !/usr/bin/env python3
# coding=utf-8
"""
This module contains common functions for processing LCIA methods
"""
import uuid
import os
from os.path import join
import lciafmt
import logging as log
import pandas as pd
import numpy as np
import yaml
import pkg_resources
import subprocess
from esupy.processed_data_mgmt import Paths, FileMeta, load_preprocessed_output,\
write_df_to_file
modulepath = os.path.dirname(os.path.realpath(__file__)).replace('\\', '/')
datapath = modulepath + '/data/'
log.basicConfig(level=log.INFO)
#Common declaration of write format for package data products
write_format = "parquet"
paths = Paths
paths.local_path = os.path.realpath(paths.local_path + "/lciafmt")
outputpath = paths.local_path
pkg = pkg_resources.get_distribution('lciafmt')
try:
git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode(
'ascii')[0:7]
except:
git_hash = None
def set_lcia_method_meta(method_id):
lcia_method_meta = FileMeta
lcia_method_meta.name_data = method_id.get_filename()
lcia_method_meta.tool = pkg.project_name
lcia_method_meta.tool_version = pkg.version
lcia_method_meta.category = method_id.get_path()
lcia_method_meta.ext = write_format
lcia_method_meta.git_hash = git_hash
return lcia_method_meta
def make_uuid(*args: str) -> str:
path = _as_path(*args)
return str(uuid.uuid3(uuid.NAMESPACE_OID, path))
def _as_path(*args: str) -> str:
strings = []
for arg in args:
if arg is None:
continue
strings.append(str(arg).strip().lower())
return "/".join(strings)
def is_non_empty_str(s: str) -> bool:
"""Tests if the given parameter is a non-empty string."""
if not isinstance(s, str):
return False
return s.strip() != ""
def is_empty_str(s: str) -> bool:
if s is None:
return True
if isinstance(s, str):
return s.strip() == ''
else:
return False
def format_cas(cas) -> str:
""" In LCIA method sheets CAS numbers are often saved as numbers. This
function formats such numbers to strings that matches the general
format of a CAS numner. It also handles other cases like None values
etc."""
if cas is None:
return ""
if cas == "x" or cas == "-":
return ""
if isinstance(cas, (int, float)):
cas = str(int(cas))
if len(cas) > 4:
cas = cas[:-3] + "-" + cas[-3:-1] + "-" + cas[-1]
return cas
return str(cas)
def aggregate_factors_for_primary_contexts(df) -> pd.DataFrame:
"""
When factors don't exist for flow categories with only a primary context, like "air", but do
exist for 1 or more categories where secondary contexts are present, like "air/urban", then this
function creates factors for that primary context as an average of the factors from flows
with the same secondary context. NOTE this will overwrite factors if they already exist
:param df: a pandas dataframe for an LCIA method
:return: a pandas dataframe for an LCIA method
"""
#Ignore the following impact categories for generating averages
ignored_categories = ['Land transformation', 'Land occupation',
'Water consumption','Mineral resource scarcity',
'Fossil resource scarcity']
indices = df['Context'].str.find('/')
ignored_list = df['Indicator'].isin(ignored_categories)
i = 0
for k in ignored_list.iteritems():
if k[1]:
indices.update(pd.Series([-1], index=[i]))
i = i + 1
primary_context = []
i = 0
for c in df['Context']:
if indices[i] > 0:
sub = c[0:indices[i]]+"/unspecified"
else:
sub = None
i = i + 1
primary_context.append(sub)
df['Primary Context'] = primary_context
#Subset the df to only include the rows were a primary context was added
df_secondary_context_only = df[df['Primary Context'].notnull()]
#Determine fields to aggregate over. Do not use flow UUID or old context
agg_fields = list(set(df.columns) - {'Context', 'Flow UUID', 'Characterization Factor'})
#drop primary context field from df
df = df.drop(columns=['Primary Context'])
df_secondary_agg = df_secondary_context_only.groupby(agg_fields, as_index=False).agg(
{'Characterization Factor': np.average})
df_secondary_agg = df_secondary_agg.rename(columns={"Primary Context": "Context"})
df = | pd.concat([df, df_secondary_agg], ignore_index=True, sort=False) | pandas.concat |
import os
from datetime import datetime, timedelta
from shutil import copy
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
from lighthouse.constants.fields import (
FIELD_COORDINATE,
FIELD_PLATE_BARCODE,
FIELD_RESULT,
FIELD_ROOT_SAMPLE_ID,
FIELD_SOURCE,
)
from lighthouse.helpers.reports import (
add_cherrypicked_column,
delete_reports,
get_cherrypicked_samples,
get_distinct_plate_barcodes,
get_fit_to_pick_samples,
get_new_report_name_and_path,
report_query_window_start,
unpad_coordinate,
)
# ----- get_new_report_name_and_path tests -----
def test_get_new_report_name_and_path(app, freezer):
report_date = datetime.now().strftime("%y%m%d_%H%M")
with app.app_context():
report_name, _ = get_new_report_name_and_path()
assert report_name == f"{report_date}_fit_to_pick_with_locations.xlsx"
# ----- unpad_coordinate tests -----
def test_unpad_coordinate_A01():
assert unpad_coordinate("A01") == "A1"
def test_unpad_coordinate_A1():
assert unpad_coordinate("A1") == "A1"
def test_unpad_coordinate_A10():
assert unpad_coordinate("A10") == "A10"
def test_unpad_coordinate_B01010():
assert unpad_coordinate("B01010") == "B1010"
# ----- delete_reports tests -----
def test_delete_reports(app):
copies_of_reports_folder = "tests/data/reports_copies"
filenames = [
"200716_1345_positives_with_locations.xlsx",
"200716_1618_positives_with_locations.xlsx",
"200716_1640_positives_with_locations.xlsx",
"200716_1641_fit_to_pick_with_locations.xlsx",
"200716_1642_fit_to_pick_with_locations.xlsx",
]
for filename in filenames:
copy(f"{copies_of_reports_folder}/{filename}", f"{app.config['REPORTS_DIR']}/{filename}")
with app.app_context():
delete_reports(filenames)
for filename in filenames:
assert os.path.isfile(f"{app.config['REPORTS_DIR']}/{filename}") is False
# ----- get_cherrypicked_samples tests -----
def test_get_cherrypicked_samples_test_db_connection_close(app):
"""
Test Scenario
- Check that connection is close when we call get_cherrypicked_samples
"""
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine") as mock_sql_engine:
mock_db_connection = Mock()
mock_sql_engine().connect.return_value = mock_db_connection
get_cherrypicked_samples(samples, plate_barcodes)
mock_db_connection.close.assert_called_once()
def test_get_cherrypicked_samples_test_db_connection_close_on_exception(app):
"""
Test Scenario
- Check that connection is close when we call get_cherrypicked_samples
"""
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine") as mock_sql_engine:
with patch(
"pandas.read_sql",
side_effect=Exception("Boom!"),
):
mock_db_connection = Mock()
mock_sql_engine().connect.return_value = mock_db_connection
get_cherrypicked_samples(samples, plate_barcodes)
mock_db_connection.close.assert_called_once()
# Test Scenario
# - Mocking database responses
# - Only the Sentinel query returns matches (No Beckman)
# - No chunking: a single query is made in which all matches are returned
# - No duplication of returned matches
def test_get_cherrypicked_samples_no_beckman(app):
expected = [
pd.DataFrame(
["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2]
), # Cherrypicking query response
]
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=expected,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes)
assert returned_samples.at[0, FIELD_ROOT_SAMPLE_ID] == "MCM001"
assert returned_samples.at[1, FIELD_ROOT_SAMPLE_ID] == "MCM003"
assert returned_samples.at[2, FIELD_ROOT_SAMPLE_ID] == "MCM005"
# Test Scenario
# - Mocking database responses
# - Only the Sentinel queries return matches (No Beckman)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - No duplication of returned matches
def test_get_cherrypicked_samples_chunking_no_beckman(app):
# Note: This represents the results of three different (Sentinel, Beckman) sets of
# database queries, each Sentinel query getting indexed from 0. Do not change the
# indices here unless you have modified the behaviour of the query.
query_results = [
pd.DataFrame(["MCM001"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM003"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
]
expected = pd.DataFrame(["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2])
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=query_results,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes, 2)
pd.testing.assert_frame_equal(expected, returned_samples)
# Test Scenario
# - Actual database responses
# - Only the Sentinel queries return matches (No Beckman)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - Duplication of returned matches across different chunks: duplicates should be filtered out
def test_get_cherrypicked_samples_repeat_tests_no_beckman(app, mlwh_sentinel_cherrypicked, event_wh_data):
# the following come from MLWH_SAMPLE_STOCK_RESOURCE in fixture_data
root_sample_ids = ["root_1", "root_2", "root_1"]
plate_barcodes = ["pb_1", "pb_2", "pb_3"]
# root_1 will match 2 samples, but only one of those will match an event (on Sanger Sample Id)
# therefore we only get 1 of the samples called 'root_1' back (the one on plate 'pb_1')
# this also checks we don't get a duplicate row for root_1 / pb_1, despite it cropped up in 2
# different 'chunks'
expected_rows = [["root_1", "pb_1", "positive", "A1"], ["root_2", "pb_2", "positive", "A1"]]
expected_columns = [FIELD_ROOT_SAMPLE_ID, FIELD_PLATE_BARCODE, "Result_lower", FIELD_COORDINATE]
expected = pd.DataFrame(np.array(expected_rows), columns=expected_columns, index=[0, 1])
with app.app_context():
chunk_size = 2
returned_samples = get_cherrypicked_samples(root_sample_ids, plate_barcodes, chunk_size)
print(returned_samples)
| pd.testing.assert_frame_equal(expected, returned_samples) | pandas.testing.assert_frame_equal |
# encoding: utf-8
from __future__ import print_function
from functools import wraps
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import matplotlib.gridspec as gridspec
import seaborn as sns
from . import performance as pfm
import jaqs.util as jutil
DECIMAL_TO_BPS = 10000
DECIMAL_TO_PCT = 100
COLOR_MAP = cm.get_cmap('rainbow') # cm.get_cmap('RdBu')
MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6',
'axes.facecolor': '#F6F6F6',
'axes.edgecolor': '#D3D3D3',
'text.color': '#555555',
'grid.color': '#B1B1B1',
'grid.alpha': 0.3,
# scale
'axes.linewidth': 2.0,
'axes.titlepad': 12,
'grid.linewidth': 1.0,
'grid.linestyle': '-',
# font size
'font.size': 13,
'axes.titlesize': 18,
'axes.labelsize': 14,
'legend.fontsize': 'small',
'lines.linewidth': 2.5,
}
mpl.rcParams.update(MPL_RCPARAMS)
# -----------------------------------------------------------------------------------
# plotting settings
def customize(func):
"""
Decorator to set plotting context and axes style during function call.
"""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop('set_context', True)
if set_context:
with plotting_context(), axes_style():
sns.despine(left=True)
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def plotting_context(context='notebook', font_scale=1.5, rc=None):
"""
Create signaldigger default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by signal font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
with signaldigger.plotting.plotting_context(font_scale=2):
signaldigger.create_full_report(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style='darkgrid', rc=None):
"""Create signaldigger default axes style context.
Under the hood, calls and returns seaborn.axes_style() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
style : str, optional
Name of seaborn style.
rc : dict, optional
Config flags.
Returns
-------
seaborn plotting context
Example
-------
with signaldigger.plotting.axes_style(style='whitegrid'):
signaldigger.create_full_report(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.axes_style(style=style, rc=rc)
class GridFigure(object):
def __init__(self, rows, cols, height_ratio=1.0):
self.rows = rows * 2
self.cols = cols
self.fig = plt.figure(figsize=(14, rows * 7 * height_ratio))
self.gs = gridspec.GridSpec(self.rows, self.cols, wspace=0.1, hspace=0.5)
self.curr_row = 0
self.curr_col = 0
self._in_row = False
def next_row(self):
if self._in_row:
self.curr_row += 2
self.curr_col = 0
self._in_row = False
subplt = plt.subplot(self.gs[self.curr_row: self.curr_row + 2, :])
self.curr_row += 2
return subplt
def next_subrow(self):
if self._in_row:
self.curr_row += 2
self.curr_col = 0
self._in_row = False
subplt = plt.subplot(self.gs[self.curr_row, :])
self.curr_row += 1
return subplt
def next_cell(self):
subplt = plt.subplot(self.gs[self.curr_row: self.curr_row + 2, self.curr_col])
self.curr_col += 1
self._in_row = True
if self.curr_col >= self.cols:
self.curr_row += 2
self.curr_col = 0
self._in_row = False
return subplt
# -----------------------------------------------------------------------------------
# Functions to Plot Tables
def plot_table(table, name=None, fmt=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pd.Series or pd.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
fmt : str, optional
Formatter to use for displaying table elements.
E.g. '{0:.2f}%' for displaying 100 as '100.00%'.
Restores original setting after displaying.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = | pd.get_option('display.float_format') | pandas.get_option |
import os
import pandas as pd
from sklearn.svm import SVC
from random import shuffle
from sklearn.metrics import accuracy_score, confusion_matrix
import matplotlib.pyplot as plt
# import seaborn as sb
# total number vps: 13, train: 9, val: 3, test: 1
data_path = "../../source/train_val_test_sets/"
save_path_plots = "../output/plots/classification_with_vps_as_train_val_test/"
files = os.listdir(data_path + "meta_data/")
LABELS = [0, 1, 2]
out_df = pd.DataFrame([], columns=["test", "val", "train", "hp_val_acc", "l4_val_acc", "l4_no_pca_val_acc",
"hp_test_acc", "l4_test_acc", "l4_no_pca_test_acc"])
for vp in files:
test_file = vp
copy = files.copy()
copy.remove(test_file)
shuffle(copy)
train_files = copy[:9]
val_files = copy[9:]
train_hps = []
val_hps = []
test_hps = []
train_l4 = []
val_l4 = []
test_l4 = []
train_l4_no_pca = []
val_l4_no_pca = []
test_l4_no_pca = []
train_labels = []
val_labels = []
test_labels = []
for train_file in train_files:
hp_df = pd.read_csv("{}data/{}".format(data_path, train_file))
l4_df = pd.read_csv("{}l4/{}".format(data_path, train_file))
l4_no_pca_df = pd.read_csv("{}l4_no_pca/{}".format(data_path, train_file))
train_hps.extend(hp_df[["yaw", "pitch", "roll"]].values.tolist())
train_l4.extend(l4_df.values.tolist())
train_l4_no_pca.extend(l4_no_pca_df.values.tolist())
train_labels.extend(hp_df["label"].values.tolist())
for val_file in val_files:
hp_df = pd.read_csv("{}data/{}".format(data_path, val_file))
l4_df = pd.read_csv("{}l4/{}".format(data_path, val_file))
l4_no_pca_df = pd.read_csv("{}l4_no_pca/{}".format(data_path, val_file))
val_hps.extend(hp_df[["yaw", "pitch", "roll"]].values.tolist())
val_l4.extend(l4_df.values.tolist())
val_l4_no_pca.extend(l4_no_pca_df.values.tolist())
val_labels.extend(hp_df["label"].values.tolist())
hp_df = pd.read_csv("{}data/{}".format(data_path, test_file))
l4_df = pd.read_csv("{}l4/{}".format(data_path, test_file))
l4_no_pca_df = pd.read_csv("{}l4_no_pca/{}".format(data_path, test_file))
test_hps.extend(hp_df[["yaw", "pitch", "roll"]].values.tolist())
test_l4.extend(l4_df.values.tolist())
test_l4_no_pca.extend(l4_no_pca_df.values.tolist())
test_labels.extend(hp_df["label"].values.tolist())
# pool labels 2 & 3
train_labels = [2 if i == 3 else int(i) for i in train_labels]
val_labels = [2 if i == 3 else int(i) for i in val_labels]
test_labels = [2 if i == 3 else int(i) for i in test_labels]
# inintialize clfs
hp_clf = SVC(kernel="rbf")
l4_clf = SVC(kernel="rbf")
l4_no_pca_clf = SVC(kernel="rbf")
# train clfs
hp_clf.fit(train_hps, train_labels)
l4_clf.fit(train_l4, train_labels)
l4_no_pca_clf.fit(train_l4_no_pca, train_labels)
# predict
hp_val_preds = hp_clf.predict(val_hps)
l4_val_preds = l4_clf.predict(val_l4)
l4_no_pca_val_preds = l4_no_pca_clf.predict(val_l4_no_pca)
hp_test_preds = hp_clf.predict(test_hps)
l4_test_preds = l4_clf.predict(test_l4)
l4_no_pca_test_preds = l4_no_pca_clf.predict(test_l4_no_pca)
# evaluate predictions
hp_val_acc = accuracy_score(val_labels, hp_val_preds)
l4_val_acc = accuracy_score(val_labels, l4_val_preds)
l4_no_pca_val_acc = accuracy_score(val_labels, l4_no_pca_val_preds)
hp_test_acc = accuracy_score(test_labels, hp_test_preds)
l4_test_acc = accuracy_score(test_labels, l4_test_preds)
l4_no_pca_test_acc = accuracy_score(test_labels, l4_no_pca_test_preds)
out_df = out_df.append(pd.DataFrame([[vp[2:-4],
str([int(x[2:-4]) for x in val_files])[1:-1],
str([int(x[2:-4]) for x in train_files])[1:-1],
round(hp_val_acc, 4), round(l4_val_acc, 4), round(l4_no_pca_val_acc, 4),
round(hp_test_acc, 4), round(l4_test_acc, 4), round(l4_no_pca_test_acc, 4)]],
columns=out_df.columns), ignore_index=True)
# plot confusion matrix
# hp val
cmat = confusion_matrix(val_labels, hp_val_preds, normalize="true", labels=LABELS)
df_cmat = pd.DataFrame(cmat, index=LABELS, columns=LABELS)
# sb.heatmap(df_cmat, annot=True, cmap=sb.cubehelix_palette(n_colors=999, dark=0.3))
plt.imshow(df_cmat, cmap='hot', interpolation='nearest')
plt.ylabel("True Label")
plt.xlabel("Predicted Label")
plt.title("CMat HP val set, Test: {}".format(vp))
plt.savefig(fname=save_path_plots + "cmat_hp_val_{}.png".format(vp))
plt.show()
# l4 val
cmat = confusion_matrix(val_labels, l4_val_preds, normalize="true", labels=LABELS)
df_cmat = pd.DataFrame(cmat, index=LABELS, columns=LABELS)
# sb.heatmap(df_cmat, annot=True, cmap=sb.cubehelix_palette(n_colors=999, dark=0.3))
plt.imshow(df_cmat, cmap='hot', interpolation='nearest')
plt.ylabel("True Label")
plt.xlabel("Predicted Label")
plt.title("CMat l4 val set, Test: {}".format(vp))
plt.savefig(fname=save_path_plots + "cmat_l4_val_{}.png".format(vp))
plt.show()
# l4_no_pca val
cmat = confusion_matrix(val_labels, l4_no_pca_val_preds, normalize="true", labels=LABELS)
df_cmat = pd.DataFrame(cmat, index=LABELS, columns=LABELS)
# sb.heatmap(df_cmat, annot=True, cmap=sb.cubehelix_palette(n_colors=999, dark=0.3))
plt.imshow(df_cmat, cmap='hot', interpolation='nearest')
plt.ylabel("True Label")
plt.xlabel("Predicted Label")
plt.title("CMat l4_no_pca val set, Test: {}".format(vp))
plt.savefig(fname=save_path_plots + "cmat_l4_no_pca_val_{}.png".format(vp))
plt.show()
# hp test
cmat = confusion_matrix(test_labels, hp_test_preds, normalize="true", labels=LABELS)
df_cmat = pd.DataFrame(cmat, index=LABELS, columns=LABELS)
# sb.heatmap(df_cmat, annot=True, cmap=sb.cubehelix_palette(n_colors=999, dark=0.3))
plt.imshow(df_cmat, cmap='hot', interpolation='nearest')
plt.ylabel("True Label")
plt.xlabel("Predicted Label")
plt.title("CMat hp test set, Test: {}".format(vp))
plt.savefig(fname=save_path_plots + "cmat_hp_test_{}.png".format(vp))
plt.show()
# l4 test
cmat = confusion_matrix(test_labels, l4_test_preds, normalize="true", labels=LABELS)
df_cmat = | pd.DataFrame(cmat, index=LABELS, columns=LABELS) | pandas.DataFrame |
# _*_ coding:utf-8 _*_
'''=================================
@Author :tix_hjq
@Date :19-10-30 ไธๅ9:36
================================='''
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import f1_score, r2_score
from numpy.random import random, shuffle
import matplotlib.pyplot as plt
from pandas import DataFrame
from tqdm import tqdm
import lightgbm as lgb
import pandas as pd
import numpy as np
import warnings
import os
import gc
import re
import datetime
import sys
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth', 100)
print(os.getcwd())
#----------------------------------------------------
class base_model():
def __init__(self,save_folder,random_state=2048):
print('base model is backend')
self.random_state=random_state
self.save_folder=save_folder
def model_fit(self,X_train,y_train,cate_fea,X_vail,y_vail,is_pred=True,test_data=None,loss=['cross_entropy','binary'],is_classiffy=True,threshold=0.103):
if is_classiffy:
loss=loss[0]
else:
loss=loss[1]
lgb_model = lgb.LGBMRegressor(
num_leaves=40, reg_alpha=1, reg_lambda=0.1, objective=loss,
max_depth=-1, learning_rate=0.05, min_child_samples=5, random_state=self.random_state,
n_estimators=8000, subsample=0.8, colsample_bytree=0.8,is_unbalance=True,
device='gpu'
# n_jobs=-1
)
lgb_model.fit(X_train,y_train,eval_set=[(X_vail,y_vail)],eval_metric='auc',
categorical_feature=cate_fea,
early_stopping_rounds=300,verbose=10)
result_weight=lgb_model.best_score_['valid_0']['auc']
# result_weight=lgb_model.best_score_['training']['binary_logloss']
model_import = | DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.