prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import copy
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from data.dataloader import JHULoader
from pytz import timezone
from utils.fitting.loss import Loss_Calculator
from utils.generic.config import read_config
"""
Helper functions for processing different reichlab submissions, processing reichlab ground truth,
Comparing reichlab models with gt, processing and formatting our (Wadhwani AI) submission,
comparing that with gt as well
"""
def get_mapping(which='location_name_to_code', reichlab_path='../../../covid19-forecast-hub', read_from_github=False):
if read_from_github:
reichlab_path = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master'
df =
|
pd.read_csv(f'{reichlab_path}/data-locations/locations.csv')
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 2 21:47:28 2021
Correlación entre TRM y Brent
Autor: <NAME> (cade<EMAIL>)
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
corte=int(input('Año de corte (mayor que 1992): '))
#DATOS PETRÓLEO:
petroleo=pd.read_csv('https://datasource.kapsarc.org/explore/dataset/spot-prices-for-crude-oil-and-petroleum-products/download/?format=csv&timezone=America/Bogota&lang=en&use_labels_for_header=true&csv_separator=%3B',sep=';',usecols=['Date','Brent Spot Price (U.S. Dollars per Barrel)'])
petroleo['Date']=pd.to_datetime(petroleo['Date'],yearfirst=True)
petroleo=petroleo.rename(columns={'Date':'Fecha','Brent Spot Price (U.S. Dollars per Barrel)':'Brent'})
petroleo=petroleo.sort_values(by='Fecha',ascending=True)
petroleo['Año']=pd.DatetimeIndex(petroleo['Fecha']).year
petroleo=petroleo.drop(petroleo[petroleo['Año']<corte].index)
#DATOS DE LA TRM:
trm=
|
pd.read_csv('https://www.datos.gov.co/api/views/32sa-8pi3/rows.csv',usecols=['VALOR','VIGENCIADESDE'])
|
pandas.read_csv
|
__author__ = 'saeedamen'
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy
from finmarketpy.util.marketconstants import MarketConstants
from findatapy.util import SwimPool
from findatapy.util import LoggerManager
import pickle
#import zlib
#import lz4framed # conda install -c conda-forge py-lz4framed
import blosc
import pickle
market_constants = MarketConstants()
class Backtest(object):
"""Conducts backtest for strategies trading assets. Assumes we have an input of total returns. Reports historical return statistics
and returns time series.
"""
def __init__(self):
self._pnl = None
self._portfolio = None
return
def calculate_diagnostic_trading_PnL(self, asset_a_df, signal_df, further_df=[], further_df_labels=[]):
"""Calculates P&L table which can be used for debugging purposes,
The table is populated with asset, signal and further dataframes provided by the user, can be used to check signalling methodology.
It does not apply parameters such as transaction costs, vol adjusment and so on.
Parameters
----------
asset_a_df : DataFrame
Asset prices
signal_df : DataFrame
Trade signals (typically +1, -1, 0 etc)
further_df : DataFrame
Further dataframes user wishes to output in the diagnostic output (typically inputs for the signals)
further_df_labels
Labels to append to the further dataframes
Returns
-------
DataFrame with asset, trading signals and returns of the trading strategy for diagnostic purposes
"""
calculations = Calculations()
asset_rets_df = calculations.calculate_returns(asset_a_df)
strategy_rets = calculations.calculate_signal_returns(signal_df, asset_rets_df)
reset_points = ((signal_df - signal_df.shift(1)).abs())
asset_a_df_entry = asset_a_df.copy(deep=True)
asset_a_df_entry[reset_points == 0] = numpy.nan
asset_a_df_entry = asset_a_df_entry.ffill()
asset_a_df_entry.columns = [x + '_entry' for x in asset_a_df_entry.columns]
asset_rets_df.columns = [x + '_asset_rets' for x in asset_rets_df.columns]
strategy_rets.columns = [x + '_strat_rets' for x in strategy_rets.columns]
signal_df.columns = [x + '_final_signal' for x in signal_df.columns]
for i in range(0, len(further_df)):
further_df[i].columns = [x + '_' + further_df_labels[i] for x in further_df[i].columns]
flatten_df = [asset_a_df, asset_a_df_entry, asset_rets_df, strategy_rets, signal_df]
for f in further_df:
flatten_df.append(f)
return calculations.pandas_outer_join(flatten_df)
def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, run_in_parallel):
"""Calculates P&L of a trading strategy and statistics to be retrieved later
Calculates the P&L for each asset/signal combination and also for the finally strategy applying appropriate
weighting in the portfolio, depending on predefined parameters, for example:
static weighting for each asset
static weighting for each asset + vol weighting for each asset
static weighting for each asset + vol weighting for each asset + vol weighting for the portfolio
Parameters
----------
br : BacktestRequest
Parameters for the backtest specifying start date, finish data, transaction costs etc.
asset_a_df : pandas.DataFrame
Asset prices to be traded
signal_df : pandas.DataFrame
Signals for the trading strategy
contract_value_df : pandas.DataFrame
Daily size of contracts
"""
calculations = Calculations()
risk_engine = RiskEngine()
# # do an outer join first, so can fill out signal and fill it down
# # this captures the case where the signal changes on an asset holiday
# # it will just get delayed till the next tradable day when we do this
# asset_df_2, signal_df_2 = asset_a_df.align(signal_df, join='outer', axis='index')
# signal_df = signal_df_2.fillna(method='ffill')
#
# # now make sure the dates of both traded asset and signal are aligned properly
# # and use as reference only those days where we have asset information
# asset_df, signal_df = asset_a_df.align(signal_df, join='left', axis = 'index')
logger = LoggerManager().getLogger(__name__)
logger.info("Calculating trading P&L...")
signal_df = signal_df.shift(br.signal_delay)
asset_df, signal_df = calculations.join_left_fill_right(asset_a_df, signal_df)
if (contract_value_df is not None):
asset_df, contract_value_df = asset_df.align(contract_value_df, join='left', axis='index')
contract_value_df = contract_value_df.fillna(
method='ffill') # fill down asset holidays (we won't trade on these days)
# non-trading days of the assets (this may of course vary between the assets we are trading
# if they are from different asset classes)
non_trading_days = numpy.isnan(asset_df.values)
# only allow signals to change on the days when we can trade assets
signal_df = signal_df.mask(non_trading_days) # fill asset holidays with NaN signals
signal_df = signal_df.fillna(method='ffill') # fill these down
tc = br.spot_tc_bp
signal_cols = signal_df.columns.values
asset_df_cols = asset_df.columns.values
pnl_cols = []
for i in range(0, len(asset_df_cols)):
pnl_cols.append(asset_df_cols[i] + " / " + signal_cols[i])
asset_df = asset_df.fillna(method='ffill') # fill down asset holidays (we won't trade on these days)
returns_df = calculations.calculate_returns(asset_df)
# apply a stop loss/take profit to every trade if this has been specified
# do this before we start to do vol weighting etc.
if br.take_profit is not None and br.stop_loss is not None:
returns_df = calculations.calculate_returns(asset_df)
# makes assumption that signal column order matches that of returns
temp_strategy_rets_df = calculations.calculate_signal_returns_as_matrix(signal_df, returns_df)
trade_rets_df = calculations.calculate_cum_rets_trades(signal_df, temp_strategy_rets_df)
# pre_signal_df = signal_df.copy()
signal_df = calculations.calculate_risk_stop_signals(signal_df, trade_rets_df, br.stop_loss, br.take_profit)
# make sure we can't trade where asset price is undefined and carry over signal
signal_df = signal_df.mask(non_trading_days) # fill asset holidays with NaN signals
signal_df = signal_df.fillna(method='ffill') # fill these down (when asset is not trading
# for debugging purposes
# if True:
# signal_df_copy = signal_df.copy()
# trade_rets_df_copy = trade_rets_df.copy()
#
# asset_df_copy.columns = [x + '_asset' for x in temp_strategy_rets_df.columns]
# temp_strategy_rets_df.columns = [x + '_strategy_rets' for x in temp_strategy_rets_df.columns]
# signal_df_copy.columns = [x + '_final_signal' for x in signal_df_copy.columns]
# trade_rets_df_copy.columns = [x + '_cum_trade' for x in trade_rets_df_copy.columns]
#
# to_plot = calculations.pandas_outer_join([asset_df_copy, pre_signal_df, signal_df_copy, trade_rets_df_copy, temp_strategy_rets_df])
# to_plot.to_csv('test.csv')
# do we have a vol target for individual signals?
if br.signal_vol_adjust is True:
leverage_df = risk_engine.calculate_leverage_factor(returns_df, br.signal_vol_target,
br.signal_vol_max_leverage,
br.signal_vol_periods, br.signal_vol_obs_in_year,
br.signal_vol_rebalance_freq,
br.signal_vol_resample_freq,
br.signal_vol_resample_type,
period_shift=br.signal_vol_period_shift)
signal_df = pandas.DataFrame(
signal_df.values * leverage_df.values, index=signal_df.index, columns=signal_df.columns)
self._individual_leverage = leverage_df # contains leverage of individual signal (before portfolio vol target)
_pnl = calculations.calculate_signal_returns_with_tc_matrix(signal_df, returns_df, tc=tc)
_pnl.columns = pnl_cols
adjusted_weights_matrix = None
# portfolio is average of the underlying signals: should we sum them or average them or use another
# weighting scheme?
if br.portfolio_combination is not None:
if br.portfolio_combination == 'sum' and br.portfolio_combination_weights is None:
portfolio = pandas.DataFrame(data=_pnl.sum(axis=1), index=_pnl.index, columns=['Portfolio'])
elif br.portfolio_combination == 'mean' and br.portfolio_combination_weights is None:
portfolio = pandas.DataFrame(data=_pnl.mean(axis=1), index=_pnl.index, columns=['Portfolio'])
adjusted_weights_matrix = self.create_portfolio_weights(br, _pnl, method='mean')
elif 'weighted' in br.portfolio_combination and isinstance(br.portfolio_combination_weights, dict):
# get the weights for each asset
adjusted_weights_matrix = self.create_portfolio_weights(br, _pnl, method=br.portfolio_combination)
portfolio = pandas.DataFrame(data=(_pnl.values * adjusted_weights_matrix), index=_pnl.index)
is_all_na =
|
pandas.isnull(portfolio)
|
pandas.isnull
|
import argparse
import pandas as pd
from flask import Flask, jsonify, request
from cian_similarity import Model
from cian_similarity.utils import _get_features, category_dummies, features_index
app = Flask(__name__)
model = Model()
@app.route("/predict", methods=["POST"])
def predict():
# TODO: Improve speed using batch processing, not one-by-one
data = request.get_json(force=True)
result = []
for sample in data:
left, right = [_get_features(el) for el in process_request(sample)]
left = left.reindex(features_index)
right = right.reindex(features_index)
x = model.get_residual_inference(left, right)
proba = model.clf.predict_proba(x.values.reshape(1, -1))
result += proba.tolist()
return jsonify(result)
@app.route("/save", methods=["GET"])
def save_model():
model.save("saved_model.pkl")
def process_request(r: str):
row =
|
pd.Series(r)
|
pandas.Series
|
import pandas
import os
import ast
def create_CSV_pipeline1(
platename, seriesperwell, path, illum_path, platedict, one_or_many, Channeldict
):
if one_or_many == "one":
print("CSV creation not enabled for Channeldict for one file/well")
return
else:
columns_per_channel = ["PathName_", "FileName_", "Frame_"]
columns = ["Metadata_Plate", "Metadata_Series", "Metadata_Site"]
channels = []
Channeldict = ast.literal_eval(Channeldict)
rounddict = {}
Channelrounds = list(Channeldict.keys())
for eachround in Channelrounds:
templist = []
templist += Channeldict[eachround].values()
channels += list(i[0] for i in templist)
rounddict[eachround] = list(i[0] for i in templist)
df = pandas.DataFrame(columns=columns)
for chan in channels:
listoffiles = []
for round in rounddict.keys():
if chan in rounddict[round]:
for well in platedict.keys():
listoffiles.append(platedict[well][round])
listoffiles = [x for l in listoffiles for x in l]
df["FileName_Orig" + chan] = listoffiles
df["Metadata_Plate"] = [platename] * len(listoffiles)
df["Metadata_Series"] = list(range(seriesperwell)) * len(platedict.keys())
for eachround in Channelrounds:
pathperround = path + eachround + "/"
for chan in channels:
for i in list(Channeldict[eachround].values()):
if chan == i[0]:
df["PathName_Orig" + chan] = pathperround
df["Frame_Orig" + chan] = i[1]
file_out_name = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name, index=False)
# Make .csv for 2_CP_ApplyIllum
df["Metadata_Site"] = df["Metadata_Series"]
well_df_list = []
well_val_df_list = []
for eachwell in platedict.keys():
well_df_list += [eachwell] * seriesperwell
wellval = eachwell.split("Well")[1]
if wellval[0] == "_":
wellval = wellval[1:]
well_val_df_list += [wellval] * seriesperwell
df["Metadata_Well"] = well_df_list
df["Metadata_Well_Value"] = well_val_df_list
for chan in channels:
listoffiles = []
for round in rounddict.keys():
if chan in rounddict[round]:
for well in platedict.keys():
listoffiles.append(platedict[well][round])
listoffiles = [x for l in listoffiles for x in l]
df["PathName_Illum" + chan] = [illum_path] * len(listoffiles)
df["FileName_Illum" + chan] = [platename + "_Illum" + chan + ".npy"] * len(
listoffiles
)
file_out_name_2 = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name_2, index=False)
return file_out_name, file_out_name_2
def create_CSV_pipeline3(platename, seriesperwell, path, well_list, range_skip):
columns = [
"Metadata_Plate",
"Metadata_Site",
"Metadata_Well",
"Metadata_Well_Value",
]
columns_per_channel = ["PathName_", "FileName_"]
channels = ["DNA", "Phalloidin"]
columns += [col + chan for col in columns_per_channel for chan in channels]
df = pandas.DataFrame(columns=columns)
sitelist = list(range(0, seriesperwell, range_skip))
sites_per_well = len(sitelist)
total_file_count = sites_per_well * len(well_list)
df["Metadata_Plate"] = [platename] * total_file_count
df["Metadata_Site"] = sitelist * len(well_list)
well_df_list = []
well_val_df_list = []
parsed_well_list = []
for eachwell in well_list:
well_df_list += [eachwell] * sites_per_well
wellval = eachwell.split("Well")[1]
if wellval[0] == "_":
wellval = wellval[1:]
well_val_df_list += [wellval] * sites_per_well
parsed_well_list.append(wellval)
df["Metadata_Well"] = well_df_list
df["Metadata_Well_Value"] = well_val_df_list
path_list = [
os.path.join(path, platename + "-" + well)
for well in well_list
for site in sitelist
]
for chan in channels:
df["PathName_" + chan] = path_list
df["FileName_" + chan] = [
"Plate_"
+ platename
+ "_Well_"
+ well
+ "_Site_"
+ str(site)
+ "_Corr"
+ chan
+ ".tiff"
for well in parsed_well_list
for site in sitelist
]
file_out_name = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name, index=False)
return file_out_name
def create_CSV_pipeline5(
platename,
seriesperwell,
expected_cycles,
path,
platedict,
one_or_many,
fast_or_slow,
):
expected_cycles = int(expected_cycles)
columns = ["Metadata_Plate", "Metadata_Site", "Metadata_SBSCycle"]
channels = ["OrigT", "OrigG", "OrigA", "OrigC", "OrigDNA"]
if one_or_many == "one" and fast_or_slow == "fast":
columns_per_channel = ["PathName_", "FileName_", "Series_", "Frame_"]
columns += [col + chan for col in columns_per_channel for chan in channels]
df = pandas.DataFrame(columns=columns)
well_list = platedict[1]
total_file_count = seriesperwell * len(well_list) * expected_cycles
df["Metadata_Plate"] = [platename] * total_file_count
df["Metadata_Site"] = (
list(range(seriesperwell)) * len(well_list) * expected_cycles
)
cycle_list = []
path_list = []
A_list = []
C_list = []
G_list = []
T_list = []
DNA_list = []
for cycle in range(1, (expected_cycles + 1)):
for eachwell in platedict[cycle]:
cycle_list += [int(cycle)] * seriesperwell
path_list += [
os.path.join(path, platedict[cycle][eachwell][0])
] * seriesperwell
T_list += [platedict[cycle][eachwell][1][0]] * seriesperwell
G_list += [platedict[cycle][eachwell][1][1]] * seriesperwell
A_list += [platedict[cycle][eachwell][1][2]] * seriesperwell
C_list += [platedict[cycle][eachwell][1][3]] * seriesperwell
DNA_list += [platedict[cycle][eachwell][1][4]] * seriesperwell
df["Metadata_SBSCycle"] = cycle_list
for chan in channels:
df["Series_" + chan] = (
list(range(seriesperwell)) * len(well_list) * expected_cycles
)
df["PathName_" + chan] = path_list
df["FileName_OrigT"] = T_list
df["FileName_OrigG"] = G_list
df["FileName_OrigA"] = A_list
df["FileName_OrigC"] = C_list
df["FileName_OrigDNA"] = DNA_list
df["Frame_OrigDNA"] = [0] * total_file_count
df["Frame_OrigG"] = ([1] * seriesperwell * len(well_list)) + (
[0] * seriesperwell * len(well_list) * (expected_cycles - 1)
)
df["Frame_OrigT"] = ([2] * seriesperwell * len(well_list)) + (
[0] * seriesperwell * len(well_list) * (expected_cycles - 1)
)
df["Frame_OrigA"] = ([3] * seriesperwell * len(well_list)) + (
[0] * seriesperwell * len(well_list) * (expected_cycles - 1)
)
df["Frame_OrigC"] = ([4] * seriesperwell * len(well_list)) + (
[0] * seriesperwell * len(well_list) * (expected_cycles - 1)
)
elif one_or_many == "many" and fast_or_slow == "slow":
columns_per_channel = ["PathName_", "FileName_", "Frame_"]
columns += [col + chan for col in columns_per_channel for chan in channels]
df = pandas.DataFrame(columns=columns)
well_list = platedict[1]
total_file_count = seriesperwell * len(well_list) * expected_cycles
df["Metadata_Plate"] = [platename] * total_file_count
df["Metadata_Site"] = (
list(range(seriesperwell)) * len(well_list) * expected_cycles
)
cycle_list = []
path_list = []
file_list = []
for cycle in range(1, (expected_cycles + 1)):
for eachwell in platedict[cycle]:
cycle_list += [int(cycle)] * seriesperwell
path_list += [
os.path.join(path, platedict[cycle][eachwell][0])
] * seriesperwell
file_list += platedict[cycle][eachwell][1]
df["Metadata_SBSCycle"] = cycle_list
for chan in channels:
df["PathName_" + chan] = path_list
df["FileName_" + chan] = file_list
df["Frame_OrigDNA"] = [0] * total_file_count
df["Frame_OrigG"] = [1] * total_file_count
df["Frame_OrigT"] = [2] * total_file_count
df["Frame_OrigA"] = [3] * total_file_count
df["Frame_OrigC"] = [4] * total_file_count
file_out_name = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name, index=False)
return file_out_name
def create_CSV_pipeline6(
platename,
seriesperwell,
expected_cycles,
path,
illum_path,
platedict,
one_or_many,
fast_or_slow,
):
expected_cycles = int(expected_cycles)
if one_or_many == "one" and fast_or_slow == "fast":
columns = [
"Metadata_Plate",
"Metadata_Series",
"Metadata_Well",
"Metadata_Well_Value",
"Metadata_ArbitraryGroup",
]
columns_per_channel = ["PathName_", "FileName_", "Series_", "Frame_"]
cycles = ["Cycle%02d_" % x for x in range(1, expected_cycles + 1)]
or_il = ["Orig", "Illum"]
channels = ["A", "C", "G", "T", "DNA"]
columns += [
col + cycle + oi + channel
for col in columns_per_channel
for cycle in cycles
for oi in or_il
for channel in channels
]
df =
|
pandas.DataFrame(columns=columns)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'VL PGothic', 'Noto Sans CJK JP']
# 事前処理
df_google =
|
pd.read_csv("https://storage.googleapis.com/covid-external/forecast_JAPAN_PREFECTURE_28.csv")
|
pandas.read_csv
|
import glob
import pandas as pd
import matplotlib.pyplot as plt
import os
import warnings
import scipy.stats as sts
import math
def setdir(path):
os.chdir(path)
def load(name, sep=',', nrows=None):
dfs = [
|
pd.read_csv(file, index_col=False, sep=sep, nrows=nrows)
|
pandas.read_csv
|
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp =
|
_to_m8(exp)
|
pandas.tseries.index._to_m8
|
"""
Initial formatting for data from the UK Centre for Ecology and Hydrology.
Data sources:
Windermere north: https://catalogue.ceh.ac.uk/documents/f385b60a-2a6b-432e-aadd-a9690415a0ca
Windermere south: https://catalogue.ceh.ac.uk/documents/e3c4d368-215d-49b2-8e12-74c99c4c3a9d
Grasmere: https://catalogue.ceh.ac.uk/documents/b891c50a-1f77-48b2-9c41-7cc0e8993c50
Esthwaite water: https://catalogue.ceh.ac.uk/documents/87360d1a-85d9-4a4e-b9ac-e315977a52d3
Bassenthwaite: https://catalogue.ceh.ac.uk/documents/91d763f2-978d-4891-b3c6-f41d29b45d55
Belham Tarn: https://catalogue.ceh.ac.uk/documents/393a5946-8a22-4350-80f3-a60d753beb00
Derwent water: https://catalogue.ceh.ac.uk/documents/106844ff-7b4c-45c3-8b4c-7cfb4a4b953b
"""
import pandas as pd
from dplython import DplyFrame, X, sift, select, arrange, mutate
# UK lakes
windermere_north = DplyFrame(pd.read_csv('../../data/NBAS_data_1945_2013.csv', na_values=[""]))
windermere_south = DplyFrame(pd.read_csv('../../data/SBAS_data_1945_2013.csv', na_values=[""]))
grasmere = DplyFrame(pd.read_csv('../../data/GRAS_data_1968_2013.csv', na_values=[""]))
bassenthwaite = DplyFrame(pd.read_csv('../../data/Bass_data_1990_2013.csv', na_values=[""]))
derwentwater = DplyFrame(pd.read_csv('../../data/DERW_data_1990_2013.csv', na_values=[""]))
esthwaite = DplyFrame(pd.read_csv('../../data/ESTH_data_1945_2013.csv', na_values=[""]))
belhalm = DplyFrame(pd.read_csv('../../data/BLEL_data_1945_2013.csv', na_values=[""]))
# make dictionary of lakes missing names and add lake name column to each dataframe
lake_naming_dict = {'Windermere_north': windermere_north, 'Windermere_south': windermere_south,
'Bassenthwaite': bassenthwaite, 'Derwentwater': derwentwater, 'Esthwaite': esthwaite,
'Grasmere': grasmere, 'Belhalm_tarn': belhalm}
counter = 0
# loop through dictionary and add lake column
for key in lake_naming_dict:
name_list = ['Windermere_north', 'Windermere_south', 'Bassenthwaite', 'Derwentwater', 'Esthwaite',
'Grasmere', 'Belhalm_tarn']
lake_naming_dict[key]['lake'] = name_list[counter]
counter = counter + 1
# make UK lakes into a list to format together
UK_df_list = [windermere_north, windermere_south, bassenthwaite, derwentwater, esthwaite, grasmere, belhalm]
UK =
|
pd.concat(UK_df_list, axis=0)
|
pandas.concat
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with
|
tm.assertRaises(TypeError)
|
pandas.util.testing.assertRaises
|
import pandas as pd
import numpy as np
import requests
from datetime import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn import preprocessing
import lightgbm as lgb
from sklearn.externals import joblib
data = pd.DataFrame(requests.get("https://harrynull.tech/cm/data/export?key=wXw8U3QlS5iEoCh9").json()['data'])
data['datetime'] = data['time'].map(lambda x: datetime.fromtimestamp(x))
data['weekday'] = data['datetime'].map(lambda x: x.weekday())
data['hour'] = data['datetime'].map(lambda x: x.hour)
def vec_cat(data, name):
x_vec_cat = DictVectorizer(sparse=False).fit_transform(data[[name]].fillna('NA').T.to_dict().values())
enc = preprocessing.OneHotEncoder()
enc.fit(x_vec_cat)
return enc.transform(x_vec_cat).toarray()
def train(DC, feature_name, X_test, X_train):
y = DC[feature_name]
y =
|
pd.DataFrame(y)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[265]:
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pickle
df = pd.read_csv('data.csv')
df = df.drop(columns=['index', 'district'])
df = df.fillna(0)
df =
|
pd.get_dummies(df, columns=["state"])
|
pandas.get_dummies
|
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mysql_url() -> str:
conn = os.environ["MYSQL_URL"]
return conn
def test_mysql_without_partition(mysql_url: str) -> None:
query = "select * from test_table limit 3"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 3], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_with_partition(mysql_url: str) -> None:
query = "select * from test_table"
df = read_sql(
mysql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 6], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3, 4.4, 5.5, 6.6], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_types(mysql_url: str) -> None:
query = "select * from test_types"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"),
"test_time": pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object"),
"test_datetime": pd.Series(["1999-07-25 00:00:00", "2020-12-31 23:59:59", None], dtype="datetime64[ns]"),
"test_new_decimal": pd.Series([1.1, None, 3.3], dtype="float"),
"test_decimal": pd.Series([1, 2, 3], dtype="float"),
"test_varchar": pd.Series([None, "varchar2", "varchar3"], dtype="object"),
"test_char": pd.Series(["char1", "char2", "char3"], dtype="object")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_types_text(mysql_url: str) -> None:
query = "select * from test_types"
df = read_sql(mysql_url, query, protocol="text")
expected = pd.DataFrame(
index=range(3),
data={
"test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"),
"test_time": pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object"),
"test_datetime": pd.Series(["1999-07-25 00:00:00", "2020-12-31 23:59:59", None], dtype="datetime64[ns]"),
"test_new_decimal": pd.Series([1.1, None, 3.3], dtype="float"),
"test_decimal": pd.Series([1, 2, 3], dtype="float"),
"test_varchar": pd.Series([None, "varchar2", "varchar3"], dtype="object"),
"test_char": pd.Series(["char1", "char2", "char3"], dtype="object")
}
)
|
assert_frame_equal(df, expected, check_names=True)
|
pandas.testing.assert_frame_equal
|
"""
This program produces a copy of the final dataset filtered for use by the visual.
The code used is a direct interpretation of the SQL files originally used (see rawSQL.sql
in references/).
- <NAME> & jrgarrard
"""
import os
import numpy as np
import pandas as pd
def generate_vis_data_file(logger, input_filepath, processed_filename, labeled_filename, output_filepath):
# Read in data
input_path = os.path.join(input_filepath, processed_filename)
processed_df = pd.read_csv(input_path)
input_path = os.path.join(input_filepath, labeled_filename)
labeled_df =
|
pd.read_csv(input_path)
|
pandas.read_csv
|
from abc import ABC, abstractmethod
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from torchnlp.word_to_vector import GloVe
from sklearn.decomposition import PCA
import numpy as np
import gensim.downloader as api
from gensim.models.word2vec import Word2Vec
from gensim.models import KeyedVectors
from zipfile import ZipFile
import os
""" This Class is an abstract class that represents the Word to Vec interface and abstract methods"""
class W2VCore(ABC):
def __init__(self, core):
super().__init__()
self.w2v = core
@abstractmethod
def get_all_vectors(self):
pass
@abstractmethod
def get_vector(self, token):
pass
def text_to_vectors(self, text):
"""This function gets text and returns a list of vectors from all the types (each unique word) in the text
Args:
text ([list(list(string))]): list of lists of tokens
Returns:
[list(np.array)]: [list of the vectors of the typs]
"""
types = set()
for sentence in text:
types |= set(sentence)
vectors = []
for t in types:
vectors.append(self.get_vector(t))
vectors = np.array(vectors)
return vectors.reshape(vectors.shape[0], -1)
def save(self, text, path):
"""This function saves the subset of vectors of the types in the text into a zip file
Args:
text ([list(list(string))]): list of lists of tokens
path ([string]):path to save
"""
types = set()
for sentence in text:
types |= set(sentence)
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
'''
This script is the processing pipeline used to create the "automatic" datasets files shown in DGML.
The automatic datasets are those that are not curated but created by this script.
There are ___ steps in this pipeline:
1. Get csv files' paths from dataset_path input parameter
2. Filter csv paths according to a specific_ids text file
3. Load DGF catalog to retrieve info about datasets
4. For each dataset (csv) path, do:
a. Return the csv dataframe and its metadata load_wrapper()
b. Create an output folder with the id of the dataset as name
c. Discard datasets not passing the first level constraints
d. Generate pandas profiling. Create file {id}_pandas_profile.html
e. Generate pandas profiling summary. Create file statistics_summary.csv
f. Get data dictionary. Create file dict_data.csv
g. Returns clean dataset before mljar.
h. Discard empty datasets and datasets with less than 3 columns. Save
its pandas profile report.
i. For each target variable (column) in the dataset, do:
1. Remove missing values in target variable column
2. Run mljar process. Save output files in {dataset_id}/{automl_target-var}/
3. Compute and store a score to this mljar run with this dataset and with this target var
4. Add a new line to the open_data_ml_datasets.csv file with the info of this dataset
(pandas profile + mljar profile for this run, etc)
Notes:
1. We remove non-ascii characters within the categorical (nominal) values (it does not work if we don't do this)
2. We remove the lines with missing values in the target variable
'''
import glob
import logging
from pathlib import Path
from typing import Union, Optional
import fire
from pandas.util import hash_pandas_object
from dotenv import load_dotenv
import fs
import pandas as pd
from csv_detective.explore_csv import routine
from fs.glob import GlobMatch
from supervised.model_framework import ModelFramework
from src.get_dataset import latest_catalog, info_from_catalog, load_dataset
from src.get_mljar import prepare_for_mljar, generate_mljar
from src.get_statistic_summary import generate_pandas_profiling, get_statistics_summary, get_data_dictionary
from app.apps.utils import slugify
import json
load_dotenv(".env")
logging.root.handlers = []
# noinspection PyArgumentList
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("debug.log", mode="w"),
logging.StreamHandler()
]
)
def get_specific_ids(specific_ids_path: Optional[Path] = None):
"""
If there is a specific_ids_path file, take the ids within and process them
exclusively.
:param specific_ids_path: The path of a text file with id per line
:return: List of read ids from the specific_ids_path file
"""
if specific_ids_path is None or not specific_ids_path.exists():
return
with open(specific_ids_path) as filo:
specific_ids = [l.strip() for l in filo.readlines()]
logging.info(f"We found specific ids. They are: {specific_ids}")
return specific_ids
def create_folder(output_dir):
if not output_dir.exists():
output_dir.mkdir()
def get_mljar_info(output_dir, automl_report):
"""
:param output_dir:
:param automl_report:
:return:
"""
# 1. Move the leaderboard.csv file (automl summary) to the upper level
automl_report.get_leaderboard().to_csv(output_dir.joinpath("leaderboard.csv"), index=False)
# 2. Delete all models (bc they are heavy and we don't use them)
model_file_paths = [Path(p) for p in glob.glob(output_dir.as_posix() + f"/**/learner_fold_*.*", recursive=True)]
model_file_paths = [p for p in model_file_paths if
p.suffix not in [".csv", ".png", ".log", ".svg"]]
for model_path in model_file_paths:
model_path.unlink()
def fill_main_csv(id_, catalog, statistics_summary, output_dir=Path("../app/assets/datasets/"),
target_variable=None, task=None, score='', automl=None):
"""This function adds a new row in dgml_datasets.csv containing info of a chosen dataset."""
main_csv_path = output_dir.joinpath('dgml_datasets.csv')
new_row = {}
dict_main_df = {'title': 'dataset.title', 'dgf_dataset_url': 'dataset.url',
'dgf_dataset_id': 'dataset.id', 'dgf_resource_url': 'url','description':'description'}
for key, item in dict_main_df.items():
new_row[key] = catalog[catalog['id'] == id_][item].values.item()
new_row['nb_lines'] = statistics_summary['Number of lines'][0]
new_row['nb_features'] = statistics_summary['Number of variables'][0]
new_row['profile_url'] = f""
if target_variable is None and task is None:
new_row['automl_url'] = ''
new_row['target_variable'] = ''
new_row['task'] = ''
else:
new_row['automl_url'] = ''
new_row['target_variable'] = target_variable
if "classification" in task.lower():
new_row["task"] = "Classification"
else:
new_row["task"] = task.capitalize()
new_row["is_validated"] = False
new_row["topic"] = "Undefined"
new_row["dict_url"] = ""
new_row['dgf_resource_id'] = id_
new_row['openml_id'] = ""
# add info about best model:
if automl is None:
new_row['best_model'] = ''
new_row['metric_type'] = ''
new_row['metric_value'] = ''
else:
if isinstance(automl._best_model, ModelFramework):
new_row['best_model'] = automl._best_model.get_name()
new_row['metric_type'] = automl._best_model.metric_name
new_row['metric_value'] = automl._best_model.get_final_loss()
else:
new_row['best_model'] = automl._best_model.algorithm_short_name
new_row['metric_type'] = automl._get_eval_metric()
new_row['metric_value'] = automl._best_model.best_loss
if main_csv_path.exists():
main_df = pd.read_csv(main_csv_path)
main_df = main_df.append(new_row, ignore_index=True)
else:
main_df = pd.DataFrame([new_row])
# add score to main csv file:
main_df.loc[
(main_df['dgf_resource_id'] == id_) & (main_df['target_variable'] == target_variable), ['score']] = score
main_df.to_csv(main_csv_path, index=False)
return main_df
def check_constraints(data, parameters):
"""This function filters the datasets in the datasets folder according to the basic constraints defined in the config file
(config/filters.json).
In fact, only datasets that have the following characteristics will be included in DGML's analysis:
* minimal NUMBER OF LINES (param: min_lines)
* maximal number of lines (param: max_lines)
* minimal NUMBER OF COLUMNS (param: min_cols)
* maximal number of columns (param: max_cols)
* minimal LINES/COLUMNS RATIO ; ex. if 10: the dataset contains at least 10 times more
lines than columns (param: lines_cols/ratio)
* the presence of BOTH NUMERICAL AND CATEGORICAL VARIABLES ; if TRUE: the dataset must contain
both categorical and numerical variables (param: num_and_cat)
* the maximum percentage of MISSING VALUES (param:max_missing_values)
"""
passed_constraints = False
nb_lines = len(data)
nb_columns = len(data.columns)
check_categorical = data.select_dtypes(include='object').empty
check_numerical = data.select_dtypes(include=['float64', 'int64']).empty
for param in parameters:
min_lines = float(param['min_lines'])
max_lines = float(param['max_lines'])
min_cols = float(param['min_cols'])
max_cols = float(param['max_cols'])
lines_cols_ratio = float(param['lines_cols/ratio'])
max_missing = float(param['max_missing_values'])
total_nan = data.isna().sum().sum() / (nb_lines * nb_columns)
if (min_lines <= nb_lines <= max_lines) and (min_cols <= nb_columns <= max_cols) and (
(nb_lines / nb_columns) >= lines_cols_ratio) and (
check_categorical is False) and (check_numerical is False) and (total_nan <= max_missing):
passed_constraints = True
return passed_constraints
# TO DO: add these parameters to a config file
def load_dataset_wrapper(dataset_name: Union[Path, str]):
csv_data = None
if isinstance(dataset_name, Path):
try:
csv_data = routine(dataset_name.as_posix(), num_rows=200)
except Exception as e:
logging.exception(f"Dataset {dataset_name}: csv-detective analysis failed")
raise e
encoding = csv_data.get("encoding", "latin-1")
separator = csv_data.get("separator", ",")
dataset_df = pd.read_csv(dataset_name, sep=separator, encoding=encoding)
dataset_df.rename(columns=slugify, inplace=True)
id_data = dataset_name.stem
# Delete file if it is a temp file (in /tmp)
if "/tmp" in dataset_name.as_posix():
dataset_name.unlink()
else:
catalog = latest_catalog() # or fixed_catalog to use our catalog
catalog_info = info_from_catalog(dataset_name, catalog)
dataset_df = load_dataset(id=dataset_name, catalog_info=catalog_info)
id_data = dataset_name
return dataset_df, id_data, csv_data
def get_csv_paths(datasets_path: Path):
def create_files_iterator(remote_globber: GlobMatch):
"""
:param remote_globber:
:return:
"""
temp_path = Path("/tmp").joinpath(Path(remote_globber.path).stem.split("--")[1] + ".csv")
with open(temp_path, "wb") as tmp:
my_fs.download(remote_globber.path, tmp)
return Path(tmp.name)
"""Return the paths of each dataset in the source CSVs folder, whether local or through a sftp connection"""
# 1. If the dataset_path is local, just return the list of csv files
if "sftp" not in datasets_path.as_posix():
if not datasets_path.exists():
raise FileNotFoundError(f"File {datasets_path} not found. Please choose another csv folder")
csv_paths = [Path(p) for p in glob.glob(datasets_path.as_posix() + f"/*.csv", recursive=True)]
else:
# 2. Try to connect to this remote resource (only dealing with sftp)
# noinspection PyBroadException
try:
my_fs = fs.open_fs(datasets_path)
csv_paths = (create_files_iterator(path) for path in my_fs.glob("**/*.csv").__iter__())
except Exception as e:
logging.exception(f"Connecting to {datasets_path} did not work")
raise e
return csv_paths
def generate_score(statistics_summary, columns_to_drop, automl):
"""Returns a score of the evaluating the 'goodness' of a given dataset. Datasets with an higher score will be selected for the app.
The score takes into account:
* the overall percentage of missing values, extracted from the pandas df of statistics_summary.csv (30%)
* the percentage of variables not retained when running mljar (warning columns and columns detected by csv detective) (40%)
* the logloss (for classification) or rmse (for regression) value for the best model (30%), extracted leaderboard.csv """
prop_missing = statistics_summary['Percentage of missing cells'] / 100
prop_not_retained = len(columns_to_drop) / statistics_summary['Number of variables']
best_metric = automl.get_leaderboard()['metric_value'].min()
score = 1 / (0.3 * prop_missing + 0.4 * prop_not_retained + 0.3 * best_metric)
return score
def read_parameters(parameters_file: Path):
"""This function reads the config file containing the parameters needed for filtering datasets."""
if parameters_file.exists():
with open(parameters_file) as fout:
parameters = json.load(fout)
else:
raise FileNotFoundError(f"Config file {parameters_file.as_posix()} does not exist.")
return parameters
def main(dataset_path: str,
output_dir: str,
specific_ids_path: str = None,
automl_mode: str = "Explain"):
"""
:param dataset_path: Folder with CSVs files
:param output_dir: Folder were the output is saved
:param specific_ids_path: Path to a text file with a file name per line. Only this files will be treated
:param automl_mode: "perform" ou "explain". Perform maximizes the model performance. Explain maximizes the
explicability of the models.
:return: None
"""
seen_dataframes = set()
output_dir = Path(output_dir)
dataset_path = Path(dataset_path)
dataset_paths = get_csv_paths(dataset_path)
specific_ids = get_specific_ids(specific_ids_path)
automl_mode = automl_mode
catalog = latest_catalog() # or fixed_catalog to use our catalog
for ix, path in enumerate(dataset_paths):
if specific_ids and dataset_path.stem not in specific_ids:
logging.warning(f"We are only analysing specific ids. Id {path.stem} is not a specified id. Trying "
f"next...")
continue
try:
data_df, id_data, csv_detective_data = load_dataset_wrapper(path)
df_hash =
|
hash_pandas_object(data_df)
|
pandas.util.hash_pandas_object
|
"""Load the processed spending data."""
from pathlib import Path
from typing import Iterable, Literal
import pandas as pd
from pydantic import validate_arguments
from .summary import ActualDepartmentSpending, BudgetedDepartmentSpending
__all__ = ["load_budgeted_department_spending", "load_actual_department_spending"]
def _load_and_combine_csv_files(files: Iterable[Path]) -> pd.DataFrame:
"""Internal function to load and combine CSV files."""
out = []
for f in sorted(files):
out.append(pd.read_csv(f, dtype={"dept_code": str, "dept_major_code": str}))
return
|
pd.concat(out, ignore_index=True)
|
pandas.concat
|
import os
import h5py
import numpy as np
import pandas as pd
from .DepotInterface import DepotInterface
def pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', '')
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
return vector
# ----------------------------------------------------------------------------------------------------------------------
# DepotMongoDB
# ----------------------------------------------------------------------------------------------------------------------
class DepotHDF5(DepotInterface):
def __init__(self, primary_keys: [str], file_path: str, compress: bool = False):
super(DepotHDF5, self).__init__(primary_keys)
self.__file_path = file_path
self.__compress = compress
if len(primary_keys) > 2:
self.log('Warning: DepotHDF5 only support 2 primary keys.')
# ------------------------------- Basic Operation -------------------------------
def query(self, *args, conditions: dict = None, fields: [str] or None = None, **kwargs) -> pd.DataFrame or None:
try:
f = h5py.File(self.file_path(), 'r')
except Exception as e:
self.log('Open file %s for read fail.' % self.file_path())
return None
finally:
pass
full_conditions = self.full_conditions(*args, conditions=conditions)
result = self.__query_dispatch(f, full_conditions, 0)
return result
def insert(self, dataset: pd.DataFrame or dict or any) -> bool:
pass
def upsert(self, dataset: pd.DataFrame or dict or any) -> bool:
if not self.check_primary_keys(dataset):
return False
if not isinstance(dataset, pd.DataFrame):
self.log('DepotHDF5.upsert() only supports DataFrame')
return False
try:
f = h5py.File(self.file_path(), 'a')
except Exception as e:
self.log('Open file % for append fail, create instead.' % self.file_path())
f = h5py.File(self.file_path(), 'w')
finally:
pass
self.__write_dispatch(f, dataset, 0)
return True
def delete(self, *args, conditions: dict = None, fields: [str] or None = None,
delete_all: bool = False, **kwargs) -> bool:
pass
def drop(self):
try:
if os.path.exists(self.file_path()):
os.remove(self.file_path())
except Exception as e:
self.log('Drop file %s fail.' % self.file_path())
finally:
pass
# ----------------------------- Advanced Operation ------------------------------
def range_of(self, field, *args, conditions: dict = None, **kwargs) -> (any, any):
primary_keys = self.primary_keys()
full_conditions = self.full_conditions(*args, conditions=conditions)
if len(primary_keys) >= 2 and primary_keys[0] not in full_conditions.keys():
self.log('Error: To get hdf5 depot data range. You should specify the level-1 group: ' + primary_keys[0])
return (None, None)
try:
f = h5py.File(self.file_path(), 'r')
except Exception as e:
self.log('Open file %s for read fail.' % self.file_path())
return None
finally:
pass
def record_count(self) -> int:
pass
def distinct_value_of_field(self, field: str) -> [str]:
pass
def all_fields(self) -> [str]:
pass
def remove_field(self, key: str) -> bool:
pass
def rename_field(self, field_old: str, field_new: str) -> bool:
pass
# ----------------------------------------------------------------------------------
def file_path(self) -> str:
return self.__file_path
def __write_dispatch(self, current_group: h5py.Group, df: pd.DataFrame, group_level: int):
# Currently only the first primary key will be the group
if group_level == 0 and len(self.primary_keys()) > 1:
self.__write_process_group(current_group, df, group_level)
else:
self.__write_process_dataset(current_group, df, group_level)
def __write_process_group(self, current_group: h5py.Group, df: pd.DataFrame, group_level: int):
primary_keys = self.primary_keys()
group_field = primary_keys[group_level]
df_group = df.groupby(group_field)
for g, d in df_group:
next_group = current_group[g] if g in current_group.keys() else current_group.create_group(g)
self.__write_dispatch(next_group, d.drop(group_field, axis=1), group_level + 1)
def __write_process_dataset(self, current_group: h5py.Group, df: pd.DataFrame, dataset_level: int):
primary_keys = self.primary_keys()
primary_key_fields = primary_keys[dataset_level:]
# Make every column in the same length
padding_dataset, max_len = self.__check_fill_dataset_group_alignment(current_group)
if len(primary_key_fields) == 0 or max_len == 0:
# Just insert
insert_df = df
else:
update_df = df.dropna(subset=primary_key_fields)
exists_df = self.__dataset_group_to_dataframe(current_group)
upsert_df =
|
pd.concat([exists_df, update_df], axis=0)
|
pandas.concat
|
#不可一气呵成跑完!中间需要对表格调整 分块执行代码
import numpy as np
from numpy import *
import pandas as pd
df = pd.read_csv('data.csv',encoding='gbk')
#数据清洗 先用EPS平台对所需数据调整后导入,故不存在错误数据、多余数据与重复数据,故只简化表格与缺失值处理
df=df.dropna(how="all")
df=df.drop([0])#delete year
#for i in range(df.shape[0]):
#由于所分析问题不针对具体地区,找出缺失值大于1的行并删除
todel=[]
for i in range(df.shape[0]):
sum = 0
for j in range(df.shape[1]):
if pd.isnull(df.iloc[i,j]):
sum+=1
if sum>=2:
todel.append(i)
break
df=df.drop(todel)
#拉格朗日乘子法作缺失值处理
from scipy.interpolate import lagrange
def ploy(s,n,k=6):
y=s[list(range(n-k,n))+list(range(n+1,n+1+k))]#取数
y=y[y.notnull()]
return lagrange(y.index,list(y))(n)
for i in df.columns:
for j in range(len(df)):
if (df[i].isnull())[j]:
df[i][j]=ploy(df[i],j)
df.to_excel('data222.xls')
#利用KMO检验与Bartlett检验判断因子分析法是否合适
import numpy as np
import math as math
dataset = pd.read_csv('data222.csv', encoding='gbk')
dataset = dataset.drop(['no','Unnamed: 0'],axis=1)
def corr(data):
return np.corrcoef(dataset)
dataset_corr = corr(dataset)#Pearson's r Pearson积矩相关系数#数据标准化
tru = pd.read_csv('true.csv', encoding='gbk')#由于精度问题求逆需要在matlab中求完导入
def kmo(dataset_corr, tr):
corr_inv = tr#这原先用np.linalg.inv求逆 但是由于精度问题导致结果出错 故matlab算完后导入
nrow_inv_corr, ncol_inv_corr = dataset_corr.shape
A = np.ones((nrow_inv_corr, ncol_inv_corr))#全1矩阵
for i in range(0, nrow_inv_corr, 1):
for j in range(i, ncol_inv_corr, 1):
A[i, j] = -(corr_inv.iloc[i, j]) / (math.sqrt(corr_inv.iloc[i, i] * corr_inv.iloc[j, j]))
A[j, i] = A[i, j]
dataset_corr = np.asarray(dataset_corr)
kmo_num = np.sum(np.square(dataset_corr)) - np.sum(np.square(np.diagonal(A)))#相关系数阵平方和与对角阵平方和的差
kmo_denom = kmo_num + np.sum(np.square(A)) - np.sum(np.square(np.diagonal(A)))
kmo_value = kmo_num / kmo_denom
return kmo_value
print(kmo(dataset_corr, tru)) # kmo test
dataset = pd.read_excel('data222.xls',encoding='gbk')
dataset = dataset.drop(['no','Unnamed: 0'],axis=1)
def corr(data):
return np.corrcoef(dataset)
dataset_corr = corr(dataset)
from scipy.stats import bartlett
bartlett(dataset_corr[0],dataset_corr[1],dataset_corr[2],dataset_corr[3],dataset_corr[4],\
dataset_corr[6],dataset_corr[7],dataset_corr[8],dataset_corr[9],dataset_corr[10],dataset_corr[11],dataset_corr[12]\
,dataset_corr[13],dataset_corr[14],dataset_corr[15],dataset_corr[16],dataset_corr[17],dataset_corr[18],dataset_corr[19]\
,dataset_corr[20],dataset_corr[21],dataset_corr[22],dataset_corr[23],dataset_corr[24],dataset_corr[25],dataset_corr[26]\
,dataset_corr[27],dataset_corr[28],dataset_corr[29])#bartlett test
#not use factor_analyzer库 纯按原理写
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as nlg
#读数据
mydata = pd.read_csv('data222.csv',encoding="gb2312")
# 去除无用数据
mydata=mydata.drop(['no','Unnamed: 0'],axis=1)
#计算相关矩阵R
R=mydata.corr() #求相关性矩阵的方法
print("样本相关性矩阵:")
print(R)
#求R的特征值和标准化特征值向量
eig_value, eigvector = nlg.eig(R)
eig = pd.DataFrame()
eig['names'] = mydata.columns
eig['eig_value'] = eig_value
#特征值从大到小排序
eig.sort_values('eig_value', ascending=False, inplace=True)
print("特征值:")
print(eig_value)
# print("特征向量:")
# print(eigvector)
#寻找公共因子个数m
print("公因子个数:")
for m in range(1, 14):
# 前m个特征值的比重大于85%的标准
if eig['eig_value'][:m].sum() / eig['eig_value'].sum() >= 0.85:
print(m)
break
# 求因子模型的因子载荷阵
A = np.zeros((14,m))
A[:,0] = math.sqrt(eig_value[0]) * eigvector[:,0]
A[:,1] = math.sqrt(eig_value[1]) * eigvector[:,1]
A[:,2] = math.sqrt(eig_value[2]) * eigvector[:,2]
A[:,3] = math.sqrt(eig_value[2]) * eigvector[:,3]
a = pd.DataFrame(A)
a.columns = ['factor1', 'factor2', 'factor3','factor4']
print("因子载荷矩阵(成分矩阵):")
print(a)
#求共同度以及特殊因子方差
h=np.zeros(14)
D=np.mat(np.eye(14))
b=np.mat(np.zeros((4,14)))
for i in range(14):
b=A[i,:]*A[i,:].T #.T 转置
h[i]=b[0]
D[i,i] = 1-b[0]
print("共同度(每个因子对公共因子的依赖程度):")
print(h)
print("特殊因子方差:")
print(pd.DataFrame(D))
#求累计方差贡献率
m=np.zeros(4)
for i in range(4):
c=A[:,i].T *A[:,i]
m[i]=c[0]
print("贡献度(每个公共因子对所有因子的影响:")
print(m)
#use factor_analyzer库
import pandas as pd
import numpy as np
from pandas import DataFrame,Series
from factor_analyzer import FactorAnalyzer
#读数据
data =
|
pd.read_csv('data222.csv',encoding="gb2312")
|
pandas.read_csv
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
import pandas.core.groupby
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
import numpy as np
import ray
from .concat import concat
from .index_metadata import _IndexMetadata
from .utils import _inherit_docstrings, _reindex_helper
@_inherit_docstrings(pandas.core.groupby.DataFrameGroupBy,
excluded=[pandas.core.groupby.DataFrameGroupBy,
pandas.core.groupby.DataFrameGroupBy.__init__])
class DataFrameGroupBy(object):
def __init__(self, df, by, axis, level, as_index, sort, group_keys,
squeeze, **kwargs):
self._columns = df.columns
self._index = df.index
self._axis = axis
self._df = df
self._by = by
self._level = level
self._as_index = as_index
self._sort = sort
self._group_keys = group_keys
self._squeeze = squeeze
self._row_metadata = df._row_metadata
self._col_metadata = df._col_metadata
if axis == 0:
self._partitions = df._block_partitions.T
else:
self._partitions = df._block_partitions
def __getattr__(self, key):
"""Afer regular attribute access, looks up the name in the columns
Args:
key (str): Attribute name.
Returns:
The value of the attribute.
"""
try:
return object.__getattribute__(self, key)
except AttributeError as e:
if key in self._columns:
raise NotImplementedError(
"SeriesGroupBy is not implemented."
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
raise e
_index_grouped_cache = None
@property
def _index_grouped(self):
if self._index_grouped_cache is None:
if self._axis == 0:
self._index_grouped_cache = pandas.Series(
np.zeros(len(self._index), dtype=np.uint8),
index=self._index).groupby(by=self._by, sort=self._sort)
else:
self._index_grouped_cache = pandas.Series(
np.zeros(len(self._columns), dtype=np.uint8),
index=self._columns).groupby(by=self._by, sort=self._sort)
return self._index_grouped_cache
_keys_and_values_cache = None
@property
def _keys_and_values(self):
if self._keys_and_values_cache is None:
self._keys_and_values_cache = \
[(k, v) for k, v in self._index_grouped]
return self._keys_and_values_cache
@property
def _grouped_partitions(self):
# It is expensive to put this multiple times, so let's just put it once
remote_by = ray.put(self._by)
if len(self._index_grouped) > 1:
return zip(*(groupby._submit(args=(remote_by,
self._axis,
self._level,
self._as_index,
self._sort,
self._group_keys,
self._squeeze)
+ tuple(part.tolist()),
num_return_vals=len(
self._index_grouped))
for part in self._partitions))
elif self._axis == 0:
return [self._df._col_partitions]
else:
return [self._df._row_partitions]
@property
def _iter(self):
from .dataframe import DataFrame
if self._axis == 0:
return ((self._keys_and_values[i][0],
DataFrame(col_partitions=part,
columns=self._columns,
index=self._keys_and_values[i][1].index,
col_metadata=self._col_metadata))
for i, part in enumerate(self._grouped_partitions))
else:
return ((self._keys_and_values[i][0],
DataFrame(row_partitions=part,
columns=self._keys_and_values[i][1].index,
index=self._index,
row_metadata=self._row_metadata))
for i, part in enumerate(self._grouped_partitions))
@property
def ngroups(self):
return len(self)
def skew(self, **kwargs):
return self._apply_agg_function(
lambda df: _skew_remote.remote(df, self._axis, kwargs))
def ffill(self, limit=None):
return self._apply_df_function(lambda df: df.ffill(axis=self._axis,
limit=limit))
def sem(self, ddof=1):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def mean(self, *args, **kwargs):
return self._apply_agg_function(
lambda df: _mean_remote.remote(df, self._axis, kwargs, *args))
def any(self):
return self._apply_agg_function(
lambda df: _any_remote.remote(df, self._axis))
@property
def plot(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def ohlc(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def __bytes__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
@property
def tshift(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
@property
def groups(self):
return {k: pandas.Index(v) for k, v in self._keys_and_values}
def min(self, **kwargs):
return self._apply_agg_function(
lambda df: _min_remote.remote(df, self._axis, kwargs))
def idxmax(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
@property
def ndim(self):
return 2 # ndim is always 2 for DataFrames
def shift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def nth(self, n, dropna=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def cumsum(self, axis=0, *args, **kwargs):
return self._apply_df_function(lambda df: df.cumsum(axis,
*args,
**kwargs))
@property
def indices(self):
return dict(self._keys_and_values)
def pct_change(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def filter(self, func, dropna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def cummax(self, axis=0, **kwargs):
return self._apply_df_function(lambda df: df.cummax(axis,
**kwargs))
def apply(self, func, *args, **kwargs):
def apply_helper(df):
return df.apply(func, axis=self._axis, *args, **kwargs)
result = [func(v) for k, v in self._iter]
if self._axis == 0:
if isinstance(result[0], pandas.Series):
# Applied an aggregation function
new_df = concat(result, axis=1).T
new_df.columns = self._columns
new_df.index = [k for k, v in self._iter]
else:
new_df = concat(result, axis=self._axis)
new_df._block_partitions = np.array([_reindex_helper._submit(
args=tuple([new_df.index, self._index, self._axis ^ 1,
len(new_df._block_partitions)]
+ block.tolist()),
num_return_vals=len(new_df._block_partitions))
for block in new_df._block_partitions.T]).T
new_df.index = self._index
new_df._row_metadata = \
_IndexMetadata(new_df._block_partitions[:, 0],
index=new_df.index, axis=0)
else:
if isinstance(result[0], pandas.Series):
# Applied an aggregation function
new_df = concat(result, axis=1)
new_df.columns = [k for k, v in self._iter]
new_df.index = self._index
else:
new_df = concat(result, axis=self._axis)
new_df._block_partitions = np.array([_reindex_helper._submit(
args=tuple([new_df.columns, self._columns, self._axis ^ 1,
new_df._block_partitions.shape[1]]
+ block.tolist()),
num_return_vals=new_df._block_partitions.shape[1])
for block in new_df._block_partitions])
new_df.columns = self._columns
new_df._col_metadata = \
_IndexMetadata(new_df._block_partitions[0, :],
index=new_df.columns, axis=1)
return new_df
@property
def dtypes(self):
if self._axis == 1:
raise ValueError("Cannot call dtypes on groupby with axis=1")
return self._apply_agg_function(lambda df: _dtypes_remote.remote(df))
def first(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def backfill(self, limit=None):
return self.bfill(limit)
def __getitem__(self, key):
# This operation requires a SeriesGroupBy Object
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def cummin(self, axis=0, **kwargs):
return self._apply_df_function(lambda df: df.cummin(axis=axis,
**kwargs))
def bfill(self, limit=None):
return self._apply_df_function(lambda df: df.bfill(axis=self._axis,
limit=limit))
def idxmin(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def prod(self, **kwargs):
return self._apply_agg_function(
lambda df: _prod_remote.remote(df, self._axis, kwargs))
def std(self, ddof=1, *args, **kwargs):
return self._apply_agg_function(
lambda df: _std_remote.remote(df, self._axis, ddof, kwargs, *args))
def aggregate(self, arg, *args, **kwargs):
if self._axis != 0:
# This is not implemented in pandas,
# so we throw a different message
raise NotImplementedError("axis other than 0 is not supported")
if is_list_like(arg):
raise NotImplementedError(
"This requires Multi-level index to be implemented. "
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
return self._apply_agg_function(
lambda df: _agg_remote.remote(df, self._axis, arg, kwargs, *args))
def last(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def mad(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def rank(self):
return self._apply_df_function(lambda df: df.rank(axis=self._axis))
@property
def corrwith(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def pad(self, limit=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def max(self, **kwargs):
return self._apply_agg_function(
lambda df: _max_remote.remote(df, self._axis, kwargs))
def var(self, ddof=1, *args, **kwargs):
return self._apply_agg_function(
lambda df: _var_remote.remote(df, self._axis, ddof, kwargs, *args))
def get_group(self, name, obj=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def __len__(self):
return len(self._index_grouped)
def all(self, **kwargs):
return self._apply_agg_function(
lambda df: _all_remote.remote(df, kwargs))
def size(self):
return self._apply_agg_function(lambda df: _size_remote.remote(df))
def sum(self, **kwargs):
return self._apply_agg_function(
lambda df: _sum_remote.remote(df, self._axis, kwargs))
def __unicode__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def describe(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def boxplot(self, grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None, layout=None, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def ngroup(self, ascending=True):
return self._index_grouped.ngroup(ascending)
def nunique(self, dropna=True):
return self._apply_agg_function(
lambda df: _nunique_remote.remote(df, self._axis, dropna))
def resample(self, rule, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def median(self, **kwargs):
return self._apply_agg_function(
lambda df: _median_remote.remote(df, self._axis, kwargs))
def head(self, n=5):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def cumprod(self, axis=0, *args, **kwargs):
return self._apply_df_function(lambda df: df.cumprod(axis,
*args,
**kwargs))
def __iter__(self):
return self._iter.__iter__()
def agg(self, arg, *args, **kwargs):
return self.aggregate(arg, *args, **kwargs)
def cov(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def transform(self, func, *args, **kwargs):
return self._apply_df_function(lambda df: df.transform(func,
*args,
**kwargs))
def corr(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def fillna(self, **kwargs):
return self._apply_df_function(lambda df: df.fillna(axis=self._axis,
**kwargs))
def count(self, **kwargs):
return self._apply_agg_function(
lambda df: _count_remote.remote(df, self._axis, kwargs))
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
def cumcount(self, ascending=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def tail(self, n=5):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
# expanding and rolling are unique cases and need to likely be handled
# separately. They do not appear to be commonly used.
def expanding(self, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def rolling(self, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def hist(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def quantile(self, q=0.5, **kwargs):
if is_list_like(q):
raise NotImplementedError(
"This requires Multi-level index to be implemented. "
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
return self._apply_agg_function(
lambda df: _quantile_remote.remote(df, self._axis, q, kwargs))
def diff(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/modin-project/modin.")
def take(self, **kwargs):
return self._apply_df_function(lambda df: df.take(**kwargs))
def _apply_agg_function(self, f, index=None):
"""Perform aggregation and combine stages based on a given function.
Args:
f: The function to apply to each group. f must be a remote
function.
Returns:
A new combined DataFrame with the result of all groups.
"""
assert callable(f), "\'{0}\' object is not callable".format(type(f))
blocks = np.array([[f(part) for part in group_of_parts]
for group_of_parts in self._grouped_partitions])
from .dataframe import DataFrame
if self._axis == 0:
return DataFrame(block_partitions=blocks, columns=self._columns,
index=index if index is not None
else [k for k, _ in self._index_grouped])
else:
return DataFrame(block_partitions=blocks.T, index=self._index,
columns=index if index is not None
else [k for k, _ in self._index_grouped])
def _apply_df_function(self, f, concat_axis=None):
assert callable(f), "\'{0}\' object is not callable".format(type(f))
result = [f(v) for k, v in self._iter]
concat_axis = self._axis if concat_axis is None else concat_axis
new_df = concat(result, axis=concat_axis)
if self._axis == 0:
new_df._block_partitions = np.array([_reindex_helper._submit(
args=tuple([new_df.index, self._index, 1,
len(new_df._block_partitions)] + block.tolist()),
num_return_vals=len(new_df._block_partitions))
for block in new_df._block_partitions.T]).T
new_df.index = self._index
new_df._row_metadata = \
_IndexMetadata(new_df._block_partitions[:, 0],
index=new_df.index, axis=0)
else:
new_df._block_partitions = np.array([_reindex_helper._submit(
args=tuple([new_df.columns, self._columns, 0,
new_df._block_partitions.shape[1]]
+ block.tolist()),
num_return_vals=new_df._block_partitions.shape[1])
for block in new_df._block_partitions])
new_df.columns = self._columns
new_df._col_metadata = \
_IndexMetadata(new_df._block_partitions[0, :],
index=new_df.columns, axis=1)
return new_df
@ray.remote
def groupby(by, axis, level, as_index, sort, group_keys, squeeze, *df):
df =
|
pandas.concat(df, axis=axis)
|
pandas.concat
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
|
tm.assert_equal(lhs < NaT, expected)
|
pandas._testing.assert_equal
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, include='cc,ee')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='dd')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='peanut!')
def test_filter_table_include_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='bb', mode='exact')
def test_filter_table_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='ab')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, exclude='xx')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='dd')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa; bb')
def test_filter_table_exclude_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='peanut!',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_table_include_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa', exclude='peanut!')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_table(table, taxonomy, include='cc', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at inclusion step
obs = filter_table(table, taxonomy, include='ee', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features - all dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='aa',
exclude='bb',
mode='exact')
# keep no features - one dropped at inclusion, one dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='cc',
exclude='cc',
mode='exact')
# keep no features - all dropped at inclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='peanut',
exclude='bb',
mode='exact')
def test_filter_table_underscores_escaped(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep feat1 only - underscore not treated as a wild card
obs = filter_table(table, taxonomy, include='cc,d_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - underscore in query matches underscore in
# taxonomy annotation
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; c_', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
obs = filter_table(table, taxonomy, include='c_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_all_features_with_frequency_greater_than_zero_get_filtered(self):
table = pd.DataFrame([[2.0, 0.0], [1.0, 0.0], [9.0, 0.0], [1.0, 0.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# empty - feat2, which is matched by the include term, has a frequency
# of zero in all samples, so all samples end up dropped from the table
with self.assertRaisesRegex(ValueError,
expected_regex='greater than zero'):
filter_table(table, taxonomy, include='dd')
def test_extra_taxon_ignored(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee', 'aa; bb; cc'],
index=pd.Index(['feat1', 'feat2', 'feat3'],
name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
def test_missing_taxon_errors(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc'],
index=pd.Index(['feat1'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, expected_regex='All.*feat2'):
filter_table(table, taxonomy, include='bb')
class FilterSeqs(unittest.TestCase):
def test_filter_no_filters(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_seqs(seqs, taxonomy)
def test_alt_delimiter(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_seqs(seqs, taxonomy, include='cc<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# exclude with delimiter
obs = filter_seqs(seqs, taxonomy, exclude='ww<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
def test_filter_seqs_unknown_mode(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_seqs(seqs, taxonomy, include='bb', mode='not-a-mode')
def test_filter_seqs_include(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='bb')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='cc,ee')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, include='cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, include='dd')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, include='peanut!')
def test_filter_seqs_include_exact_match(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, include='bb', mode='exact')
def test_filter_seqs_exclude(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, exclude='ab')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='xx')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, exclude='dd')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='dd ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; dd ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, exclude='cc')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; cc')
exp =
|
pd.Series(['ACCC'], index=['feat2'])
|
pandas.Series
|
# This is a early version of Enumerat.py
import sys
import os
import copy
import time
import json
import pandas as pd
sys.path.append(os.path.abspath('..\\game'))
class Tree(object):
def __init__(self):
self.up = None # tree structure
self.down = None # tree structure
self.layer = None # current layer number
self.state = 'Unsettled'
self.nt = [1, 2, 3, 4, 5, 6, 7, 8, 9] # next possible move
self.board = [1, 2, 3, 4, 5, 6, 7, 8, 9] # current board
self.score_1 = 0.0
self.score_2 = 0.0
def final_chk(board):
print(len(board))
a = 0
for i in range(len(board)):
if board[i].layer == 9:
if board[i].state == 'Unsettled':
a += 1
print('error')
print(a)
def find_all():
def init_obj():
a = Tree()
a.up = []
a.down = []
a.layer = 0
return a
def chk_pos(board, num):
temp = num in range(1, 10)
temp_2 = board[num - 1] not in {'O', 'X'}
return temp and temp_2
def find_num(board):
A = []
for i in range(9):
if chk_pos(board, i + 1):
A.append(i + 1)
return A
def check_board(board):
win_combination = ((0, 1, 2), (3, 4, 5), (6, 7, 8),
(0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6))
count = 0
for a in win_combination:
if board[a[0]] == board[a[1]] == board[a[2]] == "X":
return 'X'
if board[a[0]] == board[a[1]] == board[a[2]] == "O":
return 'O'
for a in range(9):
if board[a] == "X" or board[a] == "O":
count += 1
if count == 9:
return 'Tie'
return 'Unsettled'
A = []
# 0th layer
A.append(init_obj())
A[0].nt = find_num(A[0].board)
A[0].State = check_board(A[0].board)
# 1st layer
for i in A[0].nt:
temp = init_obj()
temp.layer = 1
temp.up = 0
temp.board[i - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
A.append(temp)
for i in range(len(A)):
if i == 0:
continue
else:
if A[i].up == 0:
A[0].down.append(i)
# 2nd layer
for i in A[0].down:
for j in A[i].nt:
temp = init_obj()
temp.layer = 2
temp.up = i
temp.board = copy.deepcopy(A[i].board)
temp.board[j - 1] = 'O'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
A.append(temp)
for i in range(len(A)):
if A[i].layer == 0 or A[i].layer == 1:
continue
else:
if A[i].layer == 2:
A[A[i].up].down.append(i)
# 3rd layer
for i in range(len(A)):
if A[i].layer == 1:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 3
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
A.append(temp)
for i in range(len(A)):
if A[i].layer == 3:
A[A[i].up].down.append(i)
# 4th layer
for i in range(len(A)):
if A[i].layer == 2:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 4
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'O'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
A.append(temp)
for i in range(len(A)):
if A[i].layer == 4:
A[A[i].up].down.append(i)
# 5th layer
for i in range(len(A)):
if A[i].layer == 3:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 5
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 5:
A[A[i].up].down.append(i)
# 6th layer
for i in range(len(A)):
if A[i].layer == 4:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 6
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'O'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 6:
A[A[i].up].down.append(i)
# 7th layer
for i in range(len(A)):
if A[i].layer == 5:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 7
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 7:
A[A[i].up].down.append(i)
# 8th layer
for i in range(len(A)):
if A[i].layer == 6:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 8
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'O'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 8:
A[A[i].up].down.append(i)
# 9th layer
for i in range(len(A)):
if A[i].layer == 7:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 9
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 9:
A[A[i].up].down.append(i)
return A
def score(A):
for i in range(len(A)):
if A[i].layer == 9:
if A[i].state == 'X':
A[i].score_1 = 1
elif A[i].state == 'O':
A[i].score_2 = 1
else:
pass
for k in [8, 7, 6, 5, 4, 3, 2, 1, 0]:
for i in range(len(A)):
if A[i].layer == k:
if A[i].state == 'X':
A[i].score_1 = 1
elif A[i].state == 'O':
A[i].score_2 = 1
else:
temp_1 = 0
temp_2 = 0
for j in A[i].down:
temp_1 = temp_1 + A[j].score_1
temp_2 = temp_2 + A[j].score_2
A[i].score_1 = temp_1 / len(A[i].down)
A[i].score_2 = temp_2 / len(A[i].down)
return A
def to_json(A):
O = {}
for i in range(len(A)):
O[i] = {
'up': A[i].up,
'down': A[i].down,
'layer': A[i].layer,
'state': A[i].state,
'nt': A[i].nt,
'board': A[i].board,
'score_1': A[i].score_1,
'score_2': A[i].score_2}
with open('assets//data.json', 'w') as outfile:
json.dump(O, outfile)
def to_df(A):
O = {
'up': [
0 for i in range(
len(A))], 'down': [
0 for i in range(
len(A))], 'layer': [
0 for i in range(
len(A))], 'state': [
0 for i in range(
len(A))], 'nt': [
0 for i in range(
len(A))], 'board': [
0 for i in range(
len(A))], 'score_1': [
0 for i in range(
len(A))], 'score_2': [
0 for i in range(
len(A))]}
for i in range(len(A)):
O['up'][i] = A[i].up
O['down'][i] = A[i].down
O['layer'][i] = A[i].layer
O['state'][i] = A[i].state
O['nt'][i] = A[i].nt
O['board'][i] = A[i].board
O['score_1'][i] = A[i].score_1
O['score_2'][i] = A[i].score_2
P =
|
pd.DataFrame(data=O)
|
pandas.DataFrame
|
#following tutorial: https://www.tensorflow.org/alpha/tutorials/keras/basic_regression
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
dataSetPath = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataSetPath
columnNames = ["MPG", "Cylinders", "Displacement", "Horsepower", "Weight", "Acceleration",
"Model Year", "Origin"]
rawDataset = pd.read_csv(dataSetPath, names = columnNames, na_values = "?",
comment = "\t", sep = " ", skipinitialspace = True)
dataset = rawDataset.copy()
print(dataset.tail())
dataset.isna().sum()
dataset = dataset.dropna()
origin = dataset.pop("Origin")
dataset["USA"] = (origin == 1) * 1.0
dataset["Europe"] = (origin == 2) * 1.0
dataset["Japan"] = (origin == 3) * 1.0
print(dataset.tail())
trainDataSet = dataset.sample(frac = 0.8, random_state = 0)
testDataSet = dataset.drop(trainDataSet.index)
sns.pairplot(trainDataSet[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind = "kde")
plt.show()
trainStats = trainDataSet.describe()
trainStats.pop("MPG")
trainStats = trainStats.transpose()
print(trainStats)
trainLabels = trainDataSet.pop("MPG")
testLabels = testDataSet.pop("MPG")
def norm(X):
return (X -trainStats["mean"]) / trainStats["std"]
normTrainData = norm(trainDataSet)
normTestData = norm(testDataSet)
def buildModel():
model = keras.Sequential([
layers.Dense(64, activation = "relu", input_shape = [len(trainDataSet.keys())]),
layers.Dense(64, activation = "relu"),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss="mse",optimizer=optimizer,metrics = ["mae","mse"])
return model
model = buildModel()
model.summary()
exampleBatch = normTrainData[:10]
exampleResult = model.predict(exampleBatch)
print("example result",exampleResult)
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs):
if epoch % 100 == 0: print(" ")
print(".",end="")
EPOCHS = 10000
history = model.fit(normTrainData, trainLabels, epochs = EPOCHS,
validation_split = 0.2, verbose = 0, callbacks = [PrintDot()])
hist =
|
pd.DataFrame(history.history)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
def test_lattice():
df = pd.DataFrame(data_lattice)
filename = os.path.join(
testdir, "tabula/icdar2013-dataset/competition-dataset-us/us-030.pdf"
)
tables = camelot.read_pdf(filename, pages="2")
assert_frame_equal(df, tables[0].df)
def test_lattice_table_rotated():
df = pd.DataFrame(data_lattice_table_rotated)
filename = os.path.join(testdir, "clockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
def test_lattice_two_tables():
df1 = pd.DataFrame(data_lattice_two_tables_1)
df2 = pd.DataFrame(data_lattice_two_tables_2)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename)
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
def test_lattice_table_regions():
df = pd.DataFrame(data_lattice_table_regions)
filename = os.path.join(testdir, "table_region.pdf")
tables = camelot.read_pdf(filename, table_regions=["170,370,560,270"])
|
assert_frame_equal(df, tables[0].df)
|
pandas.testing.assert_frame_equal
|
import os
import numpy as np
import pandas as pd
from pandas.core.common import array_equivalent
from plio.utils.utils import file_search
# This function reads the lookup tables used to expand metadata from the file names
# This is separated from parsing the filenames so that for large lists of files the
# lookup tables don't need to be read over and over
#
# Info in the tables is stored in a dict of dataframes so that only one variable
# (the dict) needs to be passed between functions
def read_refdata(LUT_files):
ID_info = pd.read_csv(LUT_files['ID'], index_col=0)
spectrometer_info = pd.read_csv(LUT_files['spect'], index_col=0)
# spectrometer_info.reset_index(inplace=True)
laser_info = pd.read_csv(LUT_files['laser'], index_col=0)
# laser_info.reset_index(inplace=True)
exp_info = pd.read_csv(LUT_files['exp'], index_col=0)
# exp_info.reset_index(inplace=True)
sample_info = pd.read_csv(LUT_files['sample'], index_col=0)
# sample_info.reset_index(inplace=True)
refdata = {'spect': spectrometer_info, 'laser': laser_info, 'exp': exp_info, 'sample': sample_info, 'ID': ID_info}
return refdata
# This function parses the file names to record metadata related to the observation
def jsc_filename_parse(filename, refdata):
filename = os.path.basename(filename) # strip the path off of the file name
filename = filename.split('_') # split the file name on underscores
libs_ID = filename[0]
laserID = filename[4][0]
expID = filename[5]
spectID = filename[6]
try:
sampleID = refdata['ID'].loc[libs_ID].values[0]
file_info = pd.DataFrame(refdata['sample'].loc[sampleID])
if file_info.columns.shape[0] < file_info.index.shape[0]:
file_info = file_info.T
if file_info.index.shape[0] > 1:
print('More than one matching row for ' + sampleID + '!')
tempID = 'Unknown'
file_info = pd.DataFrame(refdata['sample'].loc[tempID])
if file_info.columns.shape[0] < file_info.index.shape[0]:
file_info = file_info.T
except:
sampleID = 'Unknown'
file_info = pd.DataFrame(refdata['sample'].loc[sampleID])
if file_info.columns.shape[0] < file_info.index.shape[0]:
file_info = file_info.T
file_info['Sample ID'] = sampleID
file_info['LIBS ID'] = libs_ID
file_info.reset_index(level=0, inplace=True, drop=True)
file_info['loc'] = int(filename[1])
file_info['lab'] = filename[2]
file_info['gas'] = filename[3][0]
file_info['pressure'] = float(filename[3][1:])
if laserID in refdata['laser'].index:
laser_info = pd.DataFrame(refdata['laser'].loc[laserID]).T
laser_info.index.name = 'Laser Identifier'
laser_info.reset_index(level=0, inplace=True)
file_info = pd.concat([file_info, laser_info], axis=1)
file_info['laser_power'] = float(filename[4][1:])
if expID in refdata['exp'].index:
exp_info =
|
pd.DataFrame(refdata['exp'].loc[expID])
|
pandas.DataFrame
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# ### Imports
# SEE IN PROJECT 2, baseEMD.py contains new ISSI Structure
# %%
BASE_PATH = "/home/diegodp/Documents/PhD/Paper_3/SolO_SDO_EUI/"
from sys import path
path.append(f"{BASE_PATH}Scripts/")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from os import makedirs
from EMDComparison import LcurveSolOEMD as lcv
from datetime import datetime, timedelta
import PyEMD
from EMDComparison.signalHelpers import Signal, SignalFunctions, compareTS, new_plot_format, plot_super_summary
from astropy.convolution import Box1DKernel, convolve
from sunpy.time import parse_time
import idlsave
from collections import namedtuple
# ### General Setup of lightcurves
# Set the unsafe, target safe, and dataFolder
unsafe_dir = "/home/diegodp/Documents/PhD/Paper_3/SolO_SDO_EUI/unsafe/"
saveFolder = f"{unsafe_dir}ISSI/"
dataFolder = f"/home/diegodp/Documents/PhD/Paper_3/SolO_SDO_EUI/Scripts/ISSI/data/"
# Parameters for DELETION, showing FIG
DELETE = False
SHOWFIG = True
# We set a large possible set of periodicities
PeriodMinMax = [5, 20]
makedirs(saveFolder, exist_ok=True)
# IN SITU DATA
df_is = pd.read_csv(f"{dataFolder}small_ch_in_situ.csv")
df_is.index = pd.to_datetime(df_is["Time"])
del df_is["Time"]
insituParams = ["Vr", "Mf", "Np", "T", "Br"]
df_is = df_is[insituParams]
# REMOTE DATA
rs_171 = idlsave.read(f'{dataFolder}small_ch_171_lc_in.sav', verbose=False)
rs_193 = idlsave.read(f'{dataFolder}small_ch_193_lc_in.sav', verbose=False)
ch_flux = idlsave.read(f'{dataFolder}chflux.sav', verbose=False)
# 171 and 193 observations
time_array = rs_171.date_obs_171.copy()
time_array = [t.decode() for t in list(time_array)]
df_171 = pd.DataFrame(
{
'plume': rs_171.lc_171_plume_in,
'cbpoint': rs_171.lc_171_bp_in,
'chplume': rs_171.lc_171_ch_plume_in,
'chole': rs_171.lc_171_ch_in,
'qsun': rs_171.lc_171_qs_in,
},
index=pd.to_datetime(time_array))
df_193 = pd.DataFrame(
{
'plume': rs_193.lc_193_plume_in,
'cbpoint': rs_193.lc_193_bp_in,
'chplume': rs_193.lc_193_ch_plume_in,
'chole': rs_193.lc_193_ch_in,
'qsun': rs_193.lc_193_qs_in,
},
index=
|
pd.to_datetime(time_array)
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# KNN (K Nearest Neighbors) Alforithm
# pros:
# 1. High accuracy (with balanced dataset)
# 2. Easy to understand
# cons:
# 1. Time complexity O(n)
# 2. Consume huge memory cache
# 3. Can NOT handle with unbalanced dataset
class KNN():
'''
KNN algorithm
'''
def __init__(self, k):
'''
Args:
k(int): The nearest k instances
'''
self.k = k
def train_data_loader(self, train_path, label_name='Species'):
'''
Load training dataset
Args:
train_path(string): File path of training dataset
label_name(string): Label name of the given dataset
'''
train_csv = pd.read_csv(
train_path, header=-1, names=CSV_COLUMN_NAMES).sample(frac=1).reset_index(drop=True)
# Split the loaded training dataset into features and labels
train_fs, self.train_ls = train_csv, train_csv.pop(label_name)
# Normalize features
self.norm_train_fs = (train_fs - train_fs.min()) / \
(train_fs.max() - train_fs.min())
return self.norm_train_fs, self.train_ls
def test_data_loader(self, test_path, label_name='Species'):
'''
Load testing dataset
Args:
test_path(string): File path of testing dataset
label_name(string): Label name of the given name
'''
test_csv = pd.read_csv(
test_path, header=-1, names=CSV_COLUMN_NAMES).sample(frac=1).reset_index(drop=True)
# Split the loaded testing dataset into features and labels
test_fs, self.test_ls = test_csv, test_csv.pop(label_name)
# Normalize features
self.norm_test_fs = (test_fs - test_fs.min()) / \
(test_fs.max() - test_fs.min())
return self.norm_test_fs, self.test_ls
def pred(self, test_f):
'''
Predict the label of each testing
Args:
test_f ( < numpy.ndarray > ): Features dataframe of testing dataset
'''
feat_dist = []
# Calculate the feature distances of given data points `test_f`
# from the testing dataset `test_fs`
for f in self.norm_train_fs.values:
feat_dist.append(sum(map(abs, f - test_f)))
# Binding feature distances with training labels
_ =
|
pd.DataFrame({"F": feat_dist, "L": self.train_ls})
|
pandas.DataFrame
|
from pydp.algorithms import laplacian as dp
import numpy as np
import pandas as pd
import time
import os
import psutil
from utils import *
epsilon = pd.read_pickle('~/publication/files/epsilon.pkl')
library_name = 'pydp'
def openmind_pydp_real_dataset(dataset_folder_path, attribute, query_name, number_of_experiments):
# adult dataset
if attribute == 'age':
df_adult = pd.read_csv(dataset_folder_path+"adult.data", sep=',', header=None)
df = df_adult.iloc[:,0]
maximum = 100.0
minimum = 0.0
i = 1
if attribute == 'hrs':
df_adult = pd.read_csv(dataset_folder_path+"adult.data", sep=',', header=None)
df_hrs = df_adult.iloc[:,12]
df = np.clip(df_hrs, a_max=80, a_min=None)
maximum = max(df)
minimum = 0.0
i = 2
# education dataset
if attribute == 'absences':
df1 = pd.read_csv(dataset_folder_path+"student-mat.csv", sep=";")
df2 =
|
pd.read_csv(dataset_folder_path+"student-por.csv", sep=";")
|
pandas.read_csv
|
# Copyright 2019 <NAME> GmbH
# Copyright 2019 Apex.AI, Inc.
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ROS data model utils."""
from typing import Any
from typing import List
from typing import Mapping
from typing import Optional
from typing import Union
import numpy as np
from pandas import concat
from pandas import DataFrame
from . import DataModelUtil
from ..data_model.ros2 import Ros2DataModel
from ..processor.ros2 import Ros2Handler
class Ros2DataModelUtil(DataModelUtil):
"""ROS 2 data model utility class."""
def __init__(
self,
data_object: Union[Ros2DataModel, Ros2Handler],
) -> None:
"""
Create a Ros2DataModelUtil.
:param data_object: the data model or the event handler which has a data model
"""
super().__init__(data_object)
@property
def data(self) -> Ros2DataModel:
return super().data # type: ignore
def _prettify(
self,
original: str,
) -> str:
"""
Process symbol to make it more readable.
* remove std::allocator
* remove std::default_delete
* bind object: remove placeholder
:param original: the original symbol
:return: the prettified symbol
"""
pretty = original
# remove spaces
pretty = pretty.replace(' ', '')
# allocator
std_allocator = '_<std::allocator<void>>'
pretty = pretty.replace(std_allocator, '')
# default_delete
std_defaultdelete = 'std::default_delete'
if std_defaultdelete in pretty:
dd_start = pretty.find(std_defaultdelete)
template_param_open = dd_start + len(std_defaultdelete)
# find index of matching/closing GT sign
template_param_close = template_param_open
level = 0
done = False
while not done:
template_param_close += 1
if pretty[template_param_close] == '<':
level += 1
elif pretty[template_param_close] == '>':
if level == 0:
done = True
else:
level -= 1
pretty = pretty[:dd_start] + pretty[(template_param_close + 1):]
# bind
std_bind = 'std::_Bind<'
if pretty.startswith(std_bind):
# remove bind<>
pretty = pretty.replace(std_bind, '')
pretty = pretty[:-1]
# remove placeholder stuff
placeholder_from = pretty.find('*')
placeholder_to = pretty.find(')', placeholder_from)
pretty = pretty[:placeholder_from] + '?' + pretty[(placeholder_to + 1):]
# remove dangling comma
pretty = pretty.replace(',>', '>')
# restore meaningful spaces
if pretty.startswith('void'):
pretty = 'void' + ' ' + pretty[len('void'):]
if pretty.endswith('const'):
pretty = pretty[:(len(pretty) - len('const'))] + ' ' + 'const'
return pretty
def get_callback_symbols(self) -> Mapping[int, str]:
"""
Get mappings between a callback object and its resolved symbol.
:return: the map
"""
callback_instances = self.data.callback_instances
callback_symbols = self.data.callback_symbols
# Get a list of callback objects
callback_objects = set(callback_instances['callback_object'])
# Get their symbol
return {
obj: self._prettify(callback_symbols.loc[obj, 'symbol']) for obj in callback_objects
}
def get_tids(self) -> List[str]:
"""Get a list of thread ids corresponding to the nodes."""
return self.data.nodes['tid'].unique().tolist()
def get_rcl_publish_instances(self, topic_name) -> Optional[DataFrame]:
"""
Get rcl publish instances for all publishers with the given topic name.
:param topic_name: the topic name
:return: dataframe with [publisher handle, publish timestamp, message] columns,
or `None` if topic name not found
"""
# We could have more than one publisher for the topic
publisher_handles = self.data.rcl_publishers.loc[
self.data.rcl_publishers['topic_name'] == topic_name
].index.values.astype(int)
if len(publisher_handles) == 0:
return None
publish_instances = self.data.rcl_publish_instances.loc[
self.data.rcl_publish_instances['publisher_handle'].isin(publisher_handles)
]
publish_instances.reset_index(drop=True, inplace=True)
self.convert_time_columns(publish_instances, [], ['timestamp'], True)
return publish_instances
def get_publish_instances(self) -> DataFrame:
"""
Get all publish instances (rclcpp, rcl, rmw) in a single dataframe.
The rows are ordered by publish timestamp, so the order will usually be: rclcpp, rcl, rmw.
However, this does not apply to publications from internal publishers, i.e.,
publications that originate from below rclcpp (rcl or rmw).
TODO(christophebedard) find heuristic to exclude those?
:return: dataframe with [timestamp, message, layer 'rclcpp'|'rcl'|'rmw', publisher handle]
columns, ordered by timestamp,
and where the publisher handle is only set (non-zero) for 'rcl' publish instances
"""
# Add publisher handle columns with zeros for dataframes that do not have this column,
# otherwise NaN is used and the publisher handle values for rcl are converted to float
rclcpp_instances = self.data.rclcpp_publish_instances.copy()
rclcpp_instances['layer'] = 'rclcpp'
rclcpp_instances['publisher_handle'] = 0
rcl_instances = self.data.rcl_publish_instances.copy()
rcl_instances['layer'] = 'rcl'
rmw_instances = self.data.rmw_publish_instances.copy()
rmw_instances['layer'] = 'rmw'
rmw_instances['publisher_handle'] = 0
publish_instances =
|
concat([rclcpp_instances, rcl_instances, rmw_instances], axis=0)
|
pandas.concat
|
# -*- coding: utf-8 -*-
def excel_to_frame(path,sheets=[],header=1,index_col=2,pruduct_name_col=0,data_breaks=1,header_info=0,index_col_info=0,sheet_info=u'产品信息'):
import pandas as pd
import numpy as np
if not sheets:
import openpyxl
wb=openpyxl.load_workbook(path)
sheets=wb.sheetnames
sheets.remove(sheet_info)
product_data={}
for x in sheets:
sheet_data=pd.read_excel(path,sheet_name=x,header=header,index_col=index_col)
data_rows=len(sheet_data.index.tolist())
data_cols=1
while sheet_data.columns[index_col+data_cols+data_breaks]!=(sheet_data.columns[index_col]+'.1'):
data_cols +=1
producct_columns=pd.read_excel(path,sheet_name=x,header=header-1).iloc[0,index_col+1:index_col+data_cols].tolist()
product_names=pd.read_excel(path,sheet_name=x,header=None).iloc[:,pruduct_name_col].dropna()
product_id=0
for x in product_names:
product_DataFrame=sheet_data.iloc[0:data_rows,(data_cols+data_breaks)*product_id+index_col:(data_cols+data_breaks)*product_id+index_col+data_cols-1]
product_DataFrame.columns=producct_columns
product_id+=1
product_data[x] = product_DataFrame
product_info=pd.read_excel(path,sheet_name=sheet_info,header=header_info,index_col=index_col_info)
return product_data,product_info
def excel1_to_frame(path,sheet_info=u'产品信息',sheet_data=u'数据',index_name=u'日期',data_name=u'产品'):
import pandas as pd
info=pd.read_excel(path,sheet_name=sheet_info,index_col=0)
info_name=info[data_name]
data_sheet=
|
pd.read_excel(path,sheet_name=sheet_data,header=0,index_col=[0,1])
|
pandas.read_excel
|
from scipy.sparse import issparse, isspmatrix
import numpy as np
import pandas as pd
from multiprocessing.dummy import Pool as ThreadPool
import itertools
from tqdm import tqdm
from anndata import AnnData
from typing import Union
from .utils import normalize_data, TF_link_gene_chip
from ..tools.utils import flatten, einsum_correlation
def scribe(
adata: AnnData,
genes: Union[list, None] = None,
TFs: Union[list, None] = None,
Targets: Union[list, None] = None,
gene_filter_rate: float = 0.1,
cell_filter_UMI: int = 10000,
motif_ref: str = "https://www.dropbox.com/s/s8em539ojl55kgf/motifAnnotations_hgnc.csv?dl=1",
nt_layers: list = ["X_new", "X_total"],
normalize: bool = False,
do_CLR: bool = True,
drop_zero_cells: bool = True,
TF_link_ENCODE_ref: str = "https://www.dropbox.com/s/bjuope41pte7mf4/df_gene_TF_link_ENCODE.csv?dl=1",
) -> AnnData:
"""Apply Scribe to calculate causal network from spliced/unspliced, metabolic labeling based and other "real" time
series datasets. Note that this function can be applied to both of the metabolic labeling based single-cell assays
with newly synthesized and total RNA as well as the regular single cell assays with both the unspliced and spliced
transcripts. Furthermore, you can also replace the either the new or unspliced RNA with dynamo estimated cell-wise
velocity, transcription, splicing and degradation rates for each gene (similarly, replacing the expression values
of transcription factors with RNA binding, ribosome, epigenetics or epitranscriptomic factors, etc.) to infer the
total regulatory effects, transcription, splicing and post-transcriptional regulation of different factors.
Parameters
----------
adata: :class:`~anndata.AnnData`.
adata object that includes both newly synthesized and total gene expression of cells. Alternatively,
the object should include both unspliced and spliced gene expression of cells.
genes:
The list of gene names that will be used for casual network inference. By default, it is `None` and thus
will use all genes.
TFs:
The list of transcription factors that will be used for casual network inference. When it is `None` gene
list included in the file linked by `motif_ref` will be used.
Targets:
The list of target genes that will be used for casual network inference. When it is `None` gene list not
included in the file linked by `motif_ref` will be used.
gene_filter_rate:
minimum percentage of expressed cells for gene filtering.
cell_filter_UMI:
minimum number of UMIs for cell filtering.
motif_ref:
It provides the list of TFs gene names and is used to parse the data to get the list of TFs and Targets
for the causal network inference from those TFs to Targets. But currently the motif based filtering is not
implemented. By default it is a dropbox link that store the data from us. Other motif reference can bed
downloaded from RcisTarget: https://resources.aertslab.org/cistarget/. For human motif matrix, it can be
downloaded from June's shared folder:
https://shendure-web.gs.washington.edu/content/members/cao1025/public/nobackup/sci_fate/data/hg19-tss-
centered-10kb-7species.mc9nr.feather
nt_layers:
The two keys for layers that will be used for the network inference. Note that the layers can be changed
flexibly. See the description of this function above. The first key corresponds to the transcriptome of the
next time point, for example unspliced RNAs (or estimated velocitym, see Fig 6 of the Scribe preprint:
https://www.biorxiv.org/content/10.1101/426981v1) from RNA velocity, new RNA from scSLAM-seq data, etc.
The second key corresponds to the transcriptome of the initial time point, for example spliced RNAs from RNA
velocity, old RNA from scSLAM-seq data.
drop_zero_cells:
Whether to drop cells that with zero expression for either the potential regulator or potential target. This
can signify the relationship between potential regulators and targets, speed up the calculation, but at the
risk of ignoring strong inhibition effects from certain regulators to targets.
do_CLR:
Whether to perform context likelihood relatedness analysis on the reconstructed causal network
TF_link_ENCODE_ref:
The path to the TF chip-seq data. By default it is a dropbox link from us that stores the data. Other data
can be downloaded from: https://amp.pharm.mssm.edu/Harmonizome/dataset/ENCODE+Transcription+Factor+Targets.
Returns
-------
An updated adata object with a new key `causal_net` in .uns attribute, which stores the inferred causal network.
"""
try:
from Scribe.Scribe import causal_net_dynamics_coupling, CLR
except ImportError:
raise ImportError(
"You need to install the package `Scribe`."
"Plelease install from https://github.com/aristoteleo/Scribe-py."
"Also check our paper: "
"https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
)
# detect format of the gene name:
str_format = (
"upper"
if adata.var_names[0].isupper()
else "lower"
if adata.var_names[0].islower()
else "title"
if adata.var_names[0].istitle()
else "other"
)
motifAnnotations_hgnc = pd.read_csv(motif_ref, sep="\t")
TF_list = motifAnnotations_hgnc.loc[:, "TF"].values
if str_format == "title":
TF_list = [i.capitalize() for i in TF_list]
elif str_format == "lower":
TF_list = [i.lower() for i in TF_list]
n_obs, n_var = adata.n_obs, adata.n_vars
# generate the expression matrix for downstream analysis
if nt_layers[1] == "old" and "old" not in adata.layers.keys():
adata.layers["old"] = (
adata.layers["total"] - adata.layers["new"]
if "velocity" not in adata.layers.keys()
else adata.layers["total"] - adata.layers["velocity"]
)
# filter genes
print(f"Original gene number: {n_var}")
gene_filter_new = (adata.layers[nt_layers[0]] > 0).sum(0) > (gene_filter_rate * n_obs)
gene_filter_tot = (adata.layers[nt_layers[1]] > 0).sum(0) > (gene_filter_rate * n_obs)
if issparse(adata.layers[nt_layers[0]]):
gene_filter_new = gene_filter_new.A1
if issparse(adata.layers[nt_layers[1]]):
gene_filter_tot = gene_filter_tot.A1
adata = adata[:, gene_filter_new * gene_filter_tot]
print(f"Gene number after filtering: {sum(gene_filter_new * gene_filter_tot)}")
# filter cells
print(f"Original cell number: {n_obs}")
cell_filter = adata.layers[nt_layers[1]].sum(1) > cell_filter_UMI
if issparse(adata.layers[nt_layers[1]]):
cell_filter = cell_filter.A1
adata = adata[cell_filter, :]
if adata.n_obs == 0:
raise Exception("No cells remaining after filtering, try relaxing `cell_filtering_UMI`.")
print(f"Cell number after filtering: {adata.n_obs}")
new = adata.layers[nt_layers[0]]
total = adata.layers[nt_layers[1]]
if normalize:
# recalculate size factor
from ..preprocessing import szFactor
adata = szFactor(
adata,
method="mean-geometric-mean-total",
round_exprs=True,
total_layers=["total"],
)
szfactors = adata.obs["Size_Factor"][:, None]
# normalize data (size factor correction, log transform and the scaling)
adata.layers[nt_layers[0]] = normalize_data(new, szfactors, pseudo_expr=0.1)
adata.layers[nt_layers[1]] = normalize_data(total, szfactors, pseudo_expr=0.1)
TFs = adata.var_names[adata.var.index.isin(TF_list)].to_list() if TFs is None else np.unique(TFs)
Targets = adata.var_names.difference(TFs).to_list() if Targets is None else np.unique(Targets)
if genes is not None:
TFs = list(set(genes).intersection(TFs))
Targets = list(set(genes).intersection(Targets))
if len(TFs) == 0 or len(Targets) == 0:
raise Exception(
"The TFs or Targets are empty! Something (input TFs/Targets list, gene_filter_rate, etc.) is wrong."
)
print(f"Potential TFs are: {len(TFs)}")
print(f"Potential Targets are: {len(Targets)}")
causal_net_dynamics_coupling(
adata,
TFs,
Targets,
t0_key=nt_layers[1],
t1_key=nt_layers[0],
normalize=False,
drop_zero_cells=drop_zero_cells,
)
res_dict = {"RDI": adata.uns["causal_net"]["RDI"]}
if do_CLR:
res_dict.update({"CLR": CLR(res_dict["RDI"])})
if TF_link_ENCODE_ref is not None:
df_gene_TF_link_ENCODE = pd.read_csv(TF_link_ENCODE_ref, sep="\t")
df_gene_TF_link_ENCODE["id_gene"] = (
df_gene_TF_link_ENCODE["id"].astype("str") + "_" + df_gene_TF_link_ENCODE["linked_gene_name"].astype("str")
)
df_gene = pd.DataFrame(adata.var.index, index=adata.var.index)
df_gene.columns = ["linked_gene"]
net = res_dict[list(res_dict.keys())[-1]]
net = net.reset_index().melt(
id_vars="index",
id_names="id",
var_name="linked_gene",
value_name="corcoef",
)
net_var = net.merge(df_gene)
net_var["id_gene"] = net_var["id"].astype("str") + "_" + net_var["linked_gene_name"].astype("str")
filtered = TF_link_gene_chip(net_var, df_gene_TF_link_ENCODE, adata.var, cor_thresh=0.02)
res_dict.update({"filtered": filtered})
adata.uns["causal_net"] = res_dict
return adata
def coexp_measure(adata, genes, layer_x, layer_y, cores=1, skip_mi=True):
"""Calculate co-expression measures, including mutual information (MI), pearson correlation, etc. of genes between
two different layers.
Parameters
----------
adata: :class:`~anndata.AnnData`.
adata object that will be used for mutual information calculation.
genes: `List` (default: None)
Gene names from the adata object that will be used for mutual information calculation.
layer_x: `str`
The first key of the layer from the adata object that will be used for mutual information calculation.
layer_y: `str`
The second key of the layer from the adata object that will be used for mutual information calculation.
cores: `int` (default: 1)
Number of cores to run the MI calculation. If cores is set to be > 1, multiprocessing will be used to
parallel the calculation. `cores` is only applicable to MI calculation.
skip_mi: `bool` (default: `True`)
Whether to skip the mutual information calculation step which is time-consuming.
Returns
-------
An updated adata object that updated with a new columns (`mi`, `pearson`) in .var contains the mutual
information of input genes.
"""
try:
from Scribe.information_estimators import mi
except ImportError:
raise ImportError(
"You need to install the package `Scribe`."
"Plelease install from https://github.com/aristoteleo/Scribe-py."
"Also check our paper: "
"https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
)
adata.var["mi"], adata.var["pearson"] = np.nan, np.nan
if not skip_mi:
mi_vec = np.zeros(len(genes))
pearson = np.zeros(len(genes))
X, Y = adata[:, genes].layers[layer_x].T, adata[:, genes].layers[layer_y].T
X, Y = X.A if issparse(X) else X, Y.A if issparse(Y) else Y
k = min(5, int(adata.n_obs / 5 + 1))
for i in tqdm(
range(len(genes)),
desc=f"calculating mutual information between {layer_x} and {layer_y} data",
):
x, y = X[i], Y[i]
mask = np.logical_and(np.isfinite(x), np.isfinite(y))
pearson[i] = einsum_correlation(x[None, mask], y[mask], type="pearson")
x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]
if not skip_mi:
if cores == 1:
mi_vec[i] = mi(x, y, k=k)
if cores != 1:
if not skip_mi:
def pool_mi(x, y, k):
mask = np.logical_and(np.isfinite(x), np.isfinite(y))
x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]
return mi(x, y, k)
pool = ThreadPool(cores)
res = pool.starmap(pool_mi, zip(X, Y, itertools.repeat(k)))
pool.close()
pool.join()
mi_vec = np.array(res)
if not skip_mi:
adata.var.loc[genes, "mi"] = mi_vec
adata.var.loc[genes, "pearson"] = pearson
def coexp_measure_mat(
adata,
TFs=None,
Targets=None,
guide_keys=None,
t0_key="spliced",
t1_key="velocity",
normalize=True,
drop_zero_cells=True,
skip_mi=True,
cores=1,
copy=False,
):
"""Infer causal networks with dynamics-coupled single cells measurements.
Network inference is a insanely challenging problem which has a long history and that none of the existing
algorithms work well. However, it's quite possible that one or more of the algorithms could work if only they were
given enough data. Single-cell RNA-seq is exciting because it provides a ton of data. Somewhat surprisingly, just
having a lot of single-cell RNA-seq data won't make causal inference work well. We need a fundamentally better type
of measurement that couples information across cells and across time points. Experimental improvements are coming
now, and whether they are sufficient to power methods like Scribe is important future work. For example, the recent
developed computational algorithm (La Manno et al. 2018) estimates the levels of new (unspliced) versus mature
(spliced) transcripts from single-cell RNA-seq data for free. Moreover, exciting experimental approaches, like
single cell SLAM-seq methods (Hendriks et al. 2018; Erhard et al. 2019; Cao, Zhou, et al. 2019) are recently
developed that measures the transcriptome of two time points of the same cells. Datasets generated from those
methods will provide improvements of causal network inference as we comprehensively demonstrated from the manuscript
. This function take advantages of those datasets to infer the causal networks. We note that those technological
advance may be still not sufficient, radically different methods, for example something like highly multiplexed live
imaging that can record many genes may be needed.
Arguments
---------
adata: `anndata`
Annotated data matrix.
TFs: `List` or `None` (default: None)
The list of transcription factors that will be used for casual network inference.
Targets: `List` or `None` (default: None)
The list of target genes that will be used for casual network inference.
guide_keys: `List` (default: None)
The key of the CRISPR-guides, stored as a column in the .obs attribute. This argument is useful
for identifying the knockout or knockin genes for a perturb-seq experiment. Currently not used.
t0_key: `str` (default: spliced)
Key corresponds to the transcriptome of the initial time point, for example spliced RNAs from RNA velocity, old
RNA from scSLAM-seq data.
t1_key: `str` (default: velocity)
Key corresponds to the transcriptome of the next time point, for example unspliced RNAs (or estimated velocity,
see Fig 6 of the Scribe preprint) from RNA velocity, old RNA from scSLAM-seq data.
normalize: `bool`
Whether to scale the expression or velocity values into 0 to 1 before calculating causal networks.
drop_zero_cells: `bool` (Default: True)
Whether to drop cells that with zero expression for either the potential regulator or potential target. This
can signify the relationship between potential regulators and targets, speed up the calculation, but at the risk
of ignoring strong inhibition effects from certain regulators to targets.
copy: `bool`
Whether to return a copy of the adata or just update adata in place.
Returns
---------
An update AnnData object with inferred causal network stored as a matrix related to the key `causal_net` in the
`uns` slot.
"""
try:
from Scribe.information_estimators import mi
except ImportError:
raise ImportError(
"You need to install the package `Scribe`."
"Please install from https://github.com/aristoteleo/Scribe-py."
"Also check our paper: "
"https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
)
if TFs is None:
TFs = adata.var_names.tolist()
else:
TFs = adata.var_names.intersection(TFs).tolist()
if len(TFs) == 0:
raise Exception(
"The adata object has no gene names from .var_name that intersects with the TFs list you provided"
)
if Targets is None:
Targets = adata.var_names.tolist()
else:
Targets = adata.var_names.intersection(Targets).tolist()
if len(Targets) == 0:
raise Exception(
"The adata object has no gene names from .var_name that intersect with the Targets list you provided"
)
if guide_keys is not None:
guides = np.unique(adata.obs[guide_keys].tolist())
guides = np.setdiff1d(guides, ["*", "nan", "neg"])
idx_var = [vn in guides for vn in adata.var_names]
idx_var = np.argwhere(idx_var)
guides = adata.var_names.values[idx_var.flatten()].tolist()
# support sparse matrix:
genes = TFs + Targets
genes = np.unique(genes)
t0_df = (
pd.DataFrame(adata[:, genes].layers[t0_key].todense(), index=adata.obs_names, columns=genes)
if isspmatrix(adata.layers[t0_key])
else pd.DataFrame(adata[:, genes].layers[t0_key], index=adata.obs_names, columns=genes)
)
t1_df = (
pd.DataFrame(adata[:, genes].layers[t1_key].todense(), index=adata.obs_names, columns=genes)
if isspmatrix(adata.layers[t1_key])
else
|
pd.DataFrame(adata[:, genes].layers[t1_key], index=adata.obs_names, columns=genes)
|
pandas.DataFrame
|
import unittest
from enda.timeseries import TimeSeries
import pandas as pd
import pytz
class TestTimeSeries(unittest.TestCase):
def test_collapse_dt_series_into_periods(self):
# periods is a list of (start, end) pairs.
periods = [
(pd.to_datetime('2018-01-01 00:15:00+01:00'), pd.to_datetime('2018-01-01 00:45:00+01:00')),
(pd.to_datetime('2018-01-01 10:15:00+01:00'), pd.to_datetime('2018-01-01 15:45:00+01:00')),
(pd.to_datetime('2018-01-01 20:15:00+01:00'), pd.to_datetime('2018-01-01 21:45:00+01:00')),
]
# expand periods to build a time-series with gaps
dti = pd.DatetimeIndex([])
for s, e in periods:
dti = dti.append(pd.date_range(s, e, freq="30min"))
self.assertEqual(2+12+4, dti.shape[0])
# now find periods in the time-series
# should work with 2 types of freq arguments
for freq in ["30min", pd.to_timedelta("30min")]:
computed_periods = TimeSeries.collapse_dt_series_into_periods(dti, freq)
self.assertEqual(len(computed_periods), len(periods))
for i in range(len(periods)):
self.assertEqual(computed_periods[i][0], periods[i][0])
self.assertEqual(computed_periods[i][1], periods[i][1])
def test_collapse_dt_series_into_periods_2(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:15:00+01:00'),
|
pd.to_datetime('2018-01-01 00:45:00+01:00')
|
pandas.to_datetime
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Home Broker API - Market data downloader
# https://github.com/crapher/pyhomebroker.git
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from datetime import datetime
from .exceptions import DataException
__settlements_int = {
'1': 'spot',
'2': '24hs',
'3': '48hs'}
__settlements_str = {
'spot': '1',
'24hs': '2',
'48hs': '3'}
__callput = {
0: '',
1: 'CALL',
2: 'PUT'}
############################
## PROCESS JSON DOCUMENTS ##
############################
def convert_to_numeric_columns(df, columns):
for col in columns:
df[col] = df[col].apply(lambda x: x.replace('.', '').replace(',','.') if isinstance(x, str) else x)
df[col] = pd.to_numeric(df[col].apply(lambda x: np.nan if x == '-' else x))
return df
def process_personal_portfolio(df):
result_index = ['symbol', 'settlement']
filter_columns = ['Symbol', 'Term', 'BuyQuantity', 'BuyPrice', 'SellPrice', 'SellQuantity', 'LastPrice', 'VariationRate', 'StartPrice', 'MaxPrice', 'MinPrice', 'PreviousClose', 'TotalAmountTraded', 'TotalQuantityTraded', 'Trades', 'TradeDate', 'MaturityDate', 'StrikePrice', 'PutOrCall']
result_columns = ['symbol', 'settlement', 'bidsize', 'bid', 'ask', 'asksize', 'last', 'change', 'open', 'high', 'low', 'previous_close', 'turnover', 'volume', 'operations', 'datetime', 'expiration', 'strike', 'kind']
numeric_columns = ['last', 'open', 'high', 'low', 'volume', 'turnover', 'operations', 'change', 'bidsize', 'bid', 'asksize', 'ask', 'previous_close', 'strike']
options_columns = ['MaturityDate','StrikePrice','PutOrCall']
if not df.empty:
df.TradeDate = pd.to_datetime(df.TradeDate, format='%Y%m%d', errors='coerce') +
|
pd.to_timedelta(df.Hour, errors='coerce')
|
pandas.to_timedelta
|
from bs4 import BeautifulSoup
import requests
import re
import pandas as pd
import numpy as np
from tqdm import tqdm
def get_soup(my_url: str) -> BeautifulSoup:
my_src = requests.get(my_url).text
return BeautifulSoup(my_src, 'lxml')
def get_second_page(my_url: str) -> str:
soup = get_soup(my_url)
return soup.find_all(class_="andes-pagination__link ui-search-link")[0].get('href')
def set_number_page(my_url: str, n_page: int) -> str:
try:
regex = re.search(r'(.*)(_\d{2}_)(.*)', my_url)
return regex.group(1) + '_' + str(n_page) + '_' + regex.group(3)
except:
regex = re.search(r'(.*)(_\d{2})', my_url)
return regex.group(1) + '_' + str(n_page)
def get_results(my_url: str) -> int:
my_soup = get_soup(my_url)
regex = my_soup.find('span', class_="ui-search-search-result__quantity-results").text
rgx = re.search(r'(\d*).(\d*)', regex)
return int(rgx.group(1) + rgx.group(2))
def get_dolar_blue() -> float:
URL = 'https://www.dolarsi.com/api/api.php?type=valoresprincipales'
json = requests.get(URL).json()
blue = json[1]['casa']['venta']
blue = float(blue.replace(',', '.'))
return blue
def convert_usd(currency: float, price: str, blue: float) -> float:
price = float(price.replace('.', ''))
return price if currency == 'U$S' else price / blue
def get_bmw_model(title: str) -> pd.Series:
rgx = re.search(r'(\d{3})(i|d)', title)
try:
return pd.Series([rgx.group(1), rgx.group(2)])
except:
return pd.Series([np.nan, np.nan])
def get_brand(title):
try:
return pd.Series(title.split()[0])
except:
return pd.Series(np.nan)
def get_model(title):
try:
return pd.Series(title.split()[1])
except:
return pd.Series(np.nan)
def get_cars(ml_url: str, brand: str) -> pd.DataFrame:
n_results = get_results(ml_url)
base_url = get_second_page(ml_url)
cars = []
for i in tqdm(range(0, n_results, 49)):
# Get url for iterate
scrap_url = set_number_page(base_url, i)
# print(scrap_url)
# Get content of url, soup and scrap in order
my_soup = get_soup(scrap_url)
sections = my_soup.find_all('a', class_="ui-search-result__content ui-search-link")
for section in sections:
# Title
title = section.get('title')
# Year
yearkm = section.find('ul', class_="ui-search-card-attributes ui-search-item__group__element").get_text()
year = yearkm[:4]
# Km
m = re.search(r'(\d*).(\d*)', yearkm[4:])
km = m.group(1) + m.group(2)
# Currency
currency = section.find('span', class_="price-tag-symbol").text
# Price
price = section.find('span', class_="price-tag-fraction").text
# Location
location = section.find('span', class_="ui-search-item__group__element ui-search-item__location").text
# Link
link = section.get('href')
car = {'title': title,
'year': year,
'km': km,
'currency': currency,
'price': price,
'location': location,
'link': link}
cars.append(car)
df = pd.DataFrame(cars)
df.drop_duplicates(subset=['title', 'year', 'km', 'location'], inplace=True)
df.reset_index(inplace=True, drop=True)
# Get prices in dollars
blue = get_dolar_blue()
df['price'] = df.apply(lambda row: convert_usd(row['currency'], row['price'], blue), axis=1)
# Parse year, price, km, currency
pd.to_numeric(df['year'], errors='coerce')
pd.to_numeric(df['price'], errors='coerce')
|
pd.to_numeric(df['km'], errors='coerce')
|
pandas.to_numeric
|
"""
This file contains several helper functions to calculate sleep statistics from
a one-dimensional sleep staging vector (hypnogram).
"""
import numpy as np
import pandas as pd
__all__ = ['transition_matrix', 'sleep_statistics']
#############################################################################
# TRANSITION MATRIX
#############################################################################
def transition_matrix(hypno):
"""Create a state-transition matrix from an hypnogram.
.. versionadded:: 0.1.9
Parameters
----------
hypno : array_like
Hypnogram. The dtype of ``hypno`` must be integer
(e.g. [0, 2, 2, 1, 1, 1, ...]). The sampling frequency must be the
original one, i.e. 1 value per 30 seconds if the staging was done in
30 seconds epochs. Using an upsampled hypnogram will result in an
incorrect transition matrix.
For best results, we recommend using an hypnogram cropped to
either the time in bed (TIB) or the sleep period time (SPT).
Returns
-------
counts : array
Counts transition matrix (number of transitions from stage X to
stage Y).
probs : array
Conditional probability transition matrix, i.e.
given that current state is X, what is the probability that
the next state is Y.
``probs`` is a `right stochastic matrix
<https://en.wikipedia.org/wiki/Stochastic_matrix>`_,
i.e. each row sums to 1.
Examples
--------
>>> from yasa import transition_matrix
>>> a = [1, 1, 1, 0, 0, 2, 2, 0, 2, 0, 1, 1, 0, 0]
>>> counts, probs = transition_matrix(a)
>>> counts
0 1 2
Stage
0 2 1 2
1 2 3 0
2 2 0 1
>>> probs
0 1 2
Stage
0 0.400000 0.2 0.400000
1 0.400000 0.6 0.000000
2 0.666667 0.0 0.333333
We can plot the transition matrix using :py:func:`seaborn.heatmap`:
.. plot::
>>> import numpy as np
>>> import seaborn as sns
>>> import matplotlib.pyplot as plt
>>> from yasa import transition_matrix
>>> # Calculate probability matrix
>>> a = [1, 1, 1, 0, 0, 2, 2, 0, 2, 0, 1, 1, 0, 0]
>>> _, probs = transition_matrix(a)
>>> # Start the plot
>>> grid_kws = {"height_ratios": (.9, .05), "hspace": .1}
>>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws,
... figsize=(5, 5))
>>> sns.heatmap(probs, ax=ax, square=False, vmin=0, vmax=1, cbar=True,
... cbar_ax=cbar_ax, cmap='YlOrRd', annot=True, fmt='.2f',
... cbar_kws={"orientation": "horizontal", "fraction": 0.1,
... "label": "Transition probability"})
>>> ax.set_xlabel("To sleep stage")
>>> ax.xaxis.tick_top()
>>> ax.set_ylabel("From sleep stage")
>>> ax.xaxis.set_label_position('top')
"""
x = np.asarray(hypno, dtype=int)
unique, inverse = np.unique(x, return_inverse=True)
n = unique.size
# Integer transition counts
counts = np.zeros((n, n), dtype=int)
np.add.at(counts, (inverse[:-1], inverse[1:]), 1)
# Conditional probabilities
probs = counts / counts.sum(axis=-1, keepdims=True)
# Optional, convert to Pandas
counts = pd.DataFrame(counts, index=unique, columns=unique)
probs =
|
pd.DataFrame(probs, index=unique, columns=unique)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
|
tm.assert_series_equal(result, expected)
|
pandas.util.testing.assert_series_equal
|
# Copyright 2013-2021 The Salish Sea MEOPAR contributors
# and The University of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of Python functions to produce model residual calculations and
visualizations.
"""
import datetime
import io
import arrow
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import netCDF4 as nc
import numpy as np
import pandas as pd
import pytz
import requests
from dateutil import tz
from salishsea_tools import geo_tools, stormtools, tidetools, nc_tools
from nowcast import analyze
from nowcast.figures import shared
# Module constants
paths = {
"nowcast": "/results/SalishSea/nowcast/",
"forecast": "/results/SalishSea/forecast/",
"forecast2": "/results/SalishSea/forecast2/",
"tides": "/data/nsoontie/MEOPAR/tools/SalishSeaNowcast/tidal_predictions/",
}
colours = {
"nowcast": "DodgerBlue",
"forecast": "ForestGreen",
"forecast2": "MediumVioletRed",
"observed": "Indigo",
"predicted": "ForestGreen",
"model": "blue",
"residual": "DimGray",
}
SITES = {
# Constant with station information: mean sea level, latitude,
# longitude, station number, historical extreme ssh, etc.
# Extreme ssh from DFO website
# Mean sea level from CHS tidal constiuents.
# VENUS coordinates from the VENUS website. Depth is in meters.
"Nanaimo": {"lat": 49.16, "lon": -123.93, "msl": 3.08, "extreme_ssh": 5.47},
"Halibut Bank": {"lat": 49.34, "lon": -123.72},
"Dungeness": {"lat": 48.15, "lon": -123.117},
"La Perouse Bank": {"lat": 48.83, "lon": -126.0},
"<NAME>": {
"lat": 49.33,
"lon": -123.25,
"msl": 3.09,
"stn_no": 7795,
"extreme_ssh": 5.61,
},
"Victoria": {
"lat": 48.41,
"lon": -123.36,
"msl": 1.8810,
"stn_no": 7120,
"extreme_ssh": 3.76,
},
"<NAME>": {
"lat": 50.04,
"lon": -125.24,
"msl": 2.916,
"stn_no": 8074,
"extreme_ssh": 5.35,
},
"<NAME>": {"lat": 48.4, "lon": -124.6, "stn_no": 9443090},
"<NAME>": {"lat": 48.55, "lon": -123.016667, "stn_no": 9449880},
"<NAME>": {
"lat": 48.866667,
"lon": -122.766667,
"stn_no": 9449424,
"msl": 3.543,
"extreme_ssh": 5.846,
},
"SandHeads": {"lat": 49.10, "lon": -123.30},
"Tofino": {"lat": 49.15, "lon": -125.91, "stn_no": 8615},
"Bamfield": {"lat": 48.84, "lon": -125.14, "stn_no": 8545},
"VENUS": {
"East": {"lat": 49.0419, "lon": -123.3176, "depth": 170},
"Central": {"lat": 49.0401, "lon": -123.4261, "depth": 300},
},
}
# Module functions
def plot_residual_forcing(ax, runs_list, t_orig):
"""Plots the observed water level residual at Neah Bay against
forced residuals from existing ssh*.txt files for Neah Bay.
Function may produce none, any, or all (nowcast, forecast, forecast 2)
forced residuals depending on availability for specified date (runs_list).
:arg ax: The axis where the residuals are plotted.
:type ax: axis object
:arg runs_list: Runs that are verified as complete.
:type runs_list: list
:arg t_orig: Date being considered.
:type t_orig: datetime object
"""
# truncation times
sdt = t_orig.replace(tzinfo=tz.tzutc())
edt = sdt + datetime.timedelta(days=1)
# retrieve observations, tides and residual
tides = shared.get_tides("Neah Bay", path=paths["tides"])
res_obs, obs = obs_residual_ssh_NOAA("Neah Bay", tides, sdt, sdt)
# truncate and plot
res_obs_trun, time_trun = analyze.truncate_data(
np.array(res_obs), np.array(obs.time), sdt, edt
)
ax.plot(time_trun, res_obs_trun, colours["observed"], label="observed", lw=2.5)
# plot forcing for each simulation
for mode in runs_list:
filename_NB, run_date = analyze.create_path(mode, t_orig, "ssh*.txt")
if filename_NB:
dates, surge, fflag = NeahBay_forcing_anom(
filename_NB, run_date, paths["tides"]
)
surge_t, dates_t = analyze.truncate_data(
np.array(surge), np.array(dates), sdt, edt
)
ax.plot(dates_t, surge_t, label=mode, lw=2.5, color=colours[mode])
ax.set_title(
"Comparison of observed and forced sea surface"
" height residuals at Neah Bay:"
"{t_forcing:%d-%b-%Y}".format(t_forcing=t_orig)
)
def plot_residual_model(axs, names, runs_list, grid_B, t_orig):
"""Plots the observed sea surface height residual against the
sea surface height model residual (calculate_residual) at
specified stations. Function may produce none, any, or all
(nowcast, forecast, forecast 2) model residuals depending on
availability for specified date (runs_list).
:arg ax: The axis where the residuals are plotted.
:type ax: list of axes
:arg names: Names of station.
:type names: list of names
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg t_orig: Date being considered.
:type t_orig: datetime object
"""
bathy, X, Y = tidetools.get_bathy_data(grid_B)
t_orig_obs = t_orig + datetime.timedelta(days=-1)
t_final_obs = t_orig + datetime.timedelta(days=1)
# truncation times
sdt = t_orig.replace(tzinfo=tz.tzutc())
edt = sdt + datetime.timedelta(days=1)
for ax, name in zip(axs, names):
# Identify model grid point
lat = SITES[name]["lat"]
lon = SITES[name]["lon"]
j, i = geo_tools.find_closest_model_point(lon, lat, X, Y, land_mask=bathy.mask)
# Observed residuals and wlevs and tides
ttide = shared.get_tides(name, path=paths["tides"])
res_obs, wlev_meas = obs_residual_ssh(name, ttide, t_orig_obs, t_final_obs)
# truncate and plot
res_obs_trun, time_obs_trun = analyze.truncate_data(
np.array(res_obs), np.array(wlev_meas.time), sdt, edt
)
ax.plot(
time_obs_trun, res_obs_trun, c=colours["observed"], lw=2.5, label="observed"
)
for mode in runs_list:
filename, run_date = analyze.create_path(
mode, t_orig, "SalishSea_1h_*_grid_T.nc"
)
grid_T = nc.Dataset(filename)
res_mod, t_model, ssh_corr, ssh_mod = model_residual_ssh(
grid_T, j, i, ttide
)
# truncate and plot
res_mod_trun, t_mod_trun = analyze.truncate_data(res_mod, t_model, sdt, edt)
ax.plot(t_mod_trun, res_mod_trun, label=mode, c=colours[mode], lw=2.5)
ax.set_title(
"Comparison of modelled sea surface height residuals at"
" {station}: {t:%d-%b-%Y}".format(station=name, t=t_orig)
)
def get_error_model(names, runs_list, grid_B, t_orig):
"""Sets up the calculation for the model residual error.
:arg names: Names of station.
:type names: list of strings
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg t_orig: Date being considered.
:type t_orig: datetime object
:returns: error_mod_dict, t_mod_dict, t_orig_dict
"""
bathy, X, Y = tidetools.get_bathy_data(grid_B)
t_orig_obs = t_orig + datetime.timedelta(days=-1)
t_final_obs = t_orig + datetime.timedelta(days=1)
# truncation times
sdt = t_orig.replace(tzinfo=tz.tzutc())
edt = sdt + datetime.timedelta(days=1)
error_mod_dict = {}
t_mod_dict = {}
for name in names:
error_mod_dict[name] = {}
t_mod_dict[name] = {}
# Look up model grid
lat = SITES[name]["lat"]
lon = SITES[name]["lon"]
j, i = geo_tools.find_closest_model_point(lon, lat, X, Y, land_mask=bathy.mask)
# Observed residuals and wlevs and tides
ttide = shared.get_tides(name, path=paths["tides"])
res_obs, wlev_meas = obs_residual_ssh(name, ttide, t_orig_obs, t_final_obs)
res_obs_trun, time_obs_trun = analyze.truncate_data(
np.array(res_obs), np.array(wlev_meas.time), sdt, edt
)
for mode in runs_list:
filename, run_date = analyze.create_path(
mode, t_orig, "SalishSea_1h_*_grid_T.nc"
)
grid_T = nc.Dataset(filename)
res_mod, t_model, ssh_corr, ssh_mod = model_residual_ssh(
grid_T, j, i, ttide
)
# Truncate
res_mod_trun, t_mod_trun = analyze.truncate_data(res_mod, t_model, sdt, edt)
# Error
error_mod = analyze.calculate_error(
res_mod_trun, t_mod_trun, res_obs_trun, time_obs_trun
)
error_mod_dict[name][mode] = error_mod
t_mod_dict[name][mode] = t_mod_trun
return error_mod_dict, t_mod_dict
def get_error_forcing(runs_list, t_orig):
"""Sets up the calculation for the forcing residual error.
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list
:arg t_orig: Date being considered.
:type t_orig: datetime object
:returns: error_frc_dict, t_frc_dict
"""
# truncation times
sdt = t_orig.replace(tzinfo=tz.tzutc())
edt = sdt + datetime.timedelta(days=1)
# retrieve observed residual
tides = shared.get_tides("Neah Bay", path=paths["tides"])
res_obs, obs = obs_residual_ssh_NOAA("Neah Bay", tides, sdt, sdt)
res_obs_trun, time_trun = analyze.truncate_data(
np.array(res_obs), np.array(obs.time), sdt, edt
)
# calculate forcing error
error_frc_dict = {}
t_frc_dict = {}
for mode in runs_list:
filename_NB, run_date = analyze.create_path(mode, t_orig, "ssh*.txt")
if filename_NB:
dates, surge, fflag = NeahBay_forcing_anom(
filename_NB, run_date, paths["tides"]
)
surge_t, dates_t = analyze.truncate_data(
np.array(surge), np.array(dates), sdt, edt
)
error_frc = analyze.calculate_error(
surge_t, dates_t, res_obs_trun, obs.time
)
error_frc_dict[mode] = error_frc
t_frc_dict[mode] = dates_t
return error_frc_dict, t_frc_dict
def plot_error_model(axs, names, runs_list, grid_B, t_orig):
"""Plots the model residual error.
:arg axs: The axis where the residual errors are plotted.
:type axs: list of axes
:arg names: Names of station.
:type names: list of strings
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list of strings
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg t_orig: Date being considered.
:type t_orig: datetime object
"""
error_mod_dict, t_mod_dict = get_error_model(names, runs_list, grid_B, t_orig)
for ax, name in zip(axs, names):
ax.set_title(
"Comparison of modelled residual errors at {station}:"
" {t:%d-%b-%Y}".format(station=name, t=t_orig)
)
for mode in runs_list:
ax.plot(
t_mod_dict[name][mode],
error_mod_dict[name][mode],
label=mode,
c=colours[mode],
lw=2.5,
)
def plot_error_forcing(ax, runs_list, t_orig):
"""Plots the forcing residual error.
:arg ax: The axis where the residual errors are plotted.
:type ax: axis object
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list
:arg t_orig: Date being considered.
:type t_orig: datetime object
"""
error_frc_dict, t_frc_dict = get_error_forcing(runs_list, t_orig)
for mode in runs_list:
ax.plot(
t_frc_dict[mode], error_frc_dict[mode], label=mode, c=colours[mode], lw=2.5
)
ax.set_title(
"Comparison of observed and forced residual errors at "
"Neah Bay: {t_forcing:%d-%b-%Y}".format(t_forcing=t_orig)
)
def plot_residual_error_all(subject, grid_B, t_orig, figsize=(20, 16)):
"""Sets up and combines the plots produced by plot_residual_forcing
and plot_residual_model or plot_error_forcing and plot_error_model.
This function specifies the stations for which the nested functions
apply. Figure formatting except x-axis limits and titles are included.
:arg subject: Subject of figure, either 'residual' or 'error'.
:type subject: string
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg t_orig: Date being considered.
:type t_orig: datetime object
:arg figsize: Figure size (width, height) in inches.
:type figsize: 2-tuple
:returns: fig
"""
# set up axis limits - based on full 24 hour period 0000 to 2400
sax = t_orig
eax = t_orig + datetime.timedelta(days=1)
runs_list = analyze.verified_runs(t_orig)
fig, axes = plt.subplots(4, 1, figsize=figsize)
axs_mod = [axes[1], axes[2], axes[3]]
names = ["<NAME>", "Victoria", "<NAME>"]
if subject == "residual":
plot_residual_forcing(axes[0], runs_list, t_orig)
plot_residual_model(axs_mod, names, runs_list, grid_B, t_orig)
elif subject == "error":
plot_error_forcing(axes[0], runs_list, t_orig)
plot_error_model(axs_mod, names, runs_list, grid_B, t_orig)
for ax in axes:
ax.set_ylim([-0.4, 0.4])
ax.set_xlabel("[hrs UTC]")
ax.set_ylabel("[m]")
hfmt = mdates.DateFormatter("%m/%d %H:%M")
ax.xaxis.set_major_formatter(hfmt)
ax.legend(loc=2, ncol=4)
ax.grid()
ax.set_xlim([sax, eax])
return fig
def combine_errors(name, mode, dates, grid_B):
"""Combine model and forcing errors for a simulaion mode over several days.
returns time series of both model and forcing error and daily means.
Treats each simulation over 24 hours.
:arg name: name of station for model calculation
:type name: string, example 'Point Atkinson', 'Victoria'
:arg mode: simulation mode: nowcast, forecast, or forecast2
:type mode: string
:arg dates: list of dates to combine
:type dates: list of datetime objects
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset
:returns: force, model, time, daily_time.
model and force are dictionaries with keys 'error' and 'daily'.
Each key corresponds to array of error time series and daily means.
time is an array of times correspinding to error caclulations
daily_time is an array of timea corresponding to daily means
"""
model = {"error": np.array([]), "daily": np.array([])}
force = {"error": np.array([]), "daily": np.array([])}
time = np.array([])
daily_time = np.array([])
for t_sim in dates:
# check if the run happened
if mode in analyze.verified_runs(t_sim):
# retrieve forcing and model error
e_frc_tmp, t_frc_tmp = get_error_forcing([mode], t_sim)
e_mod_tmp, t_mod_tmp = get_error_model([name], [mode], grid_B, t_sim)
e_frc_tmp = shared.interp_to_model_time(
t_mod_tmp[name][mode], e_frc_tmp[mode], t_frc_tmp[mode]
)
# append to larger array
force["error"] = np.append(force["error"], e_frc_tmp)
model["error"] = np.append(model["error"], e_mod_tmp[name][mode])
time = np.append(time, t_mod_tmp[name][mode])
# append daily mean error
force["daily"] = np.append(force["daily"], np.nanmean(e_frc_tmp))
model["daily"] = np.append(
model["daily"], np.nanmean(e_mod_tmp[name][mode])
)
daily_time = np.append(daily_time, t_sim + datetime.timedelta(hours=12))
else:
print("{} simulation for {} did not occur".format(mode, t_sim))
return force, model, time, daily_time
def compare_errors(name, mode, start, end, grid_B, figsize=(20, 12)):
"""compares the model and forcing error at a station
between dates start and end for a simulation mode."""
# array of dates for iteration
numdays = (end - start).days
dates = [start + datetime.timedelta(days=num) for num in range(0, numdays + 1)]
dates.sort()
# intiialize figure and arrays
fig, axs = plt.subplots(3, 1, figsize=figsize)
force, model, time, daily_time = combine_errors(name, mode, dates, grid_B)
ttide = shared.get_tides(name, path=paths["tides"])
# Plotting time series
ax = axs[0]
ax.plot(time, force["error"], "b", label="Forcing error", lw=2)
ax.plot(time, model["error"], "g", lw=2, label="Model error")
ax.set_title("Comparison of {mode} error at" " {name}".format(mode=mode, name=name))
ax.set_ylim([-0.4, 0.4])
hfmt = mdates.DateFormatter("%m/%d %H:%M")
# Plotting daily mean
ax = axs[1]
ax.plot(daily_time, force["daily"], "b", label="Forcing daily mean error", lw=2)
ax.plot(
[time[0], time[-1]],
[np.nanmean(force["error"]), np.nanmean(force["error"])],
"--b",
label="Mean forcing error",
lw=2,
)
ax.plot(daily_time, model["daily"], "g", lw=2, label="Model daily mean error")
ax.plot(
[time[0], time[-1]],
[np.nanmean(model["error"]), np.nanmean(model["error"])],
"--g",
label="Mean model error",
lw=2,
)
ax.set_title(
"Comparison of {mode} daily mean error at"
" {name}".format(mode=mode, name=name)
)
ax.set_ylim([-0.4, 0.4])
# Plot tides
ax = axs[2]
ax.plot(ttide.time, ttide.pred_all, "k", lw=2, label="tides")
ax.set_title("Tidal predictions")
ax.set_ylim([-3, 3])
# format axes
hfmt = mdates.DateFormatter("%m/%d %H:%M")
for ax in axs:
ax.xaxis.set_major_formatter(hfmt)
ax.legend(loc=2, ncol=4)
ax.grid()
ax.set_xlim([start, end + datetime.timedelta(days=1)])
ax.set_ylabel("[m]")
return fig
def model_residual_ssh(grid_T, j, i, tides):
"""Calcuates the model residual at coordinate j, i.
:arg grid_T: hourly model results file
:type grid_T: netCDF file
:arg j: model y-index
:type j: integer 0<=j<898
:arg i: model i-index
:type i: integer 0<=i<398
:arg tides: tidal predictions at grid point
:type tides: pandas DataFrame
:returns: res_mod, t_model, ssh_corr, ssh_mod
The model residual, model times, model corrected ssh, and
unmodified model ssh"""
ssh_mod = grid_T.variables["sossheig"][:, j, i]
t_s, t_f, t_model = get_model_time_variables(grid_T)
ssh_corr = shared.correct_model_ssh(ssh_mod, t_model, tides)
res_mod = compute_residual(ssh_corr, t_model, tides)
return res_mod, t_model, ssh_corr, ssh_mod
def obs_residual_ssh(name, tides, sdt, edt):
"""Calculates the observed residual at Point Atkinson, Campbell River,
or Victoria.
:arg name: Name of station.
:type name: string
:arg sdt: The beginning of the date range of interest.
:type sdt: datetime object
:arg edt: The end of the date range of interest.
:type edt: datetime object
:returns: residual (calculated residual), obs (observed water levels),
tides (predicted tides)"""
msl = SITES[name]["msl"]
obs = load_archived_observations(
name, sdt.strftime("%d-%b-%Y"), edt.strftime("%d-%b-%Y")
)
residual = compute_residual(obs.wlev - msl, obs.time, tides)
return residual, obs
def obs_residual_ssh_NOAA(name, tides, sdt, edt, product="hourly_height"):
"""Calculates the residual of the observed water levels with respect
to the predicted tides at a specific NOAA station and for a date range.
:arg name: Name of station.
:type name: string
:arg sdt: The beginning of the date range of interest.
:type sdt: datetime object
:arg edt: The end of the date range of interest.
:type edt: datetime object
:arg product: defines frequency of observed water levels
'hourly_height' for hourly or 'water_levels' for 6 min
:type product: string
:returns: residual (calculated residual), obs (observed water levels),
tides (predicted tides)
"""
sites = SITES
start_date = sdt.strftime("%d-%b-%Y")
end_date = edt.strftime("%d-%b-%Y")
obs = get_NOAA_wlevels(sites[name]["stn_no"], start_date, end_date, product=product)
# Prepare to find residual
residual = compute_residual(obs.wlev, obs.time, tides)
return residual, obs
def plot_wlev_residual_NOAA(t_orig, elements, figsize=(20, 6)):
"""Plots the water level residual as calculated by the function
calculate_wlev_residual_NOAA and has the option to also plot the
observed water levels and predicted tides over the course of one day.
:arg t_orig: The beginning of the date range of interest.
:type t_orig: datetime object
:arg elements: Elements included in figure.
'residual' for residual only and 'all' for residual,
observed water level, and predicted tides.
:type elements: string
:arg figsize: Figure size (width, height) in inches.
:type figsize: 2-tuple
:returns: fig
"""
tides = shared.get_tides("Neah Bay", path=paths["tides"])
residual, obs = obs_residual_ssh_NOAA("Neah Bay", tides, t_orig, t_orig)
# Figure
fig, ax = plt.subplots(1, 1, figsize=figsize)
# Plot
ax.plot(
obs.time,
residual,
colours["residual"],
label="Observed Residual",
linewidth=2.5,
)
if elements == "all":
ax.plot(
obs.time,
obs.wlev,
colours["observed"],
label="Observed Water Level",
lw=2.5,
)
ax.plot(
tides.time,
tides.pred[tides.time == obs.time],
colours["predicted"],
label="Tidal Predictions",
linewidth=2.5,
)
if elements == "residual":
pass
ax.set_title(
"Residual of the observed water levels at"
" Neah Bay: {t:%d-%b-%Y}".format(t=t_orig)
)
ax.set_ylim([-3.0, 3.0])
ax.set_xlabel("[hrs]")
hfmt = mdates.DateFormatter("%m/%d %H:%M")
ax.xaxis.set_major_formatter(hfmt)
ax.legend(loc=2, ncol=3)
ax.grid()
return fig
def NeahBay_forcing_anom(textfile, run_date, tide_file, archive=False, fromtar=False):
"""Calculate the Neah Bay forcing anomaly for the data stored in textfile.
:arg textfile: the textfile containing forecast/observations
:type textfile: string
:arg run_date: date of the simulation
:type run_date: datetime object
:arg tide_file: path and name for the tide file
:type tide_file: string
:returns: dates, surge, forecast_flag
The dates, surges and a flag specifying if each point was a forecast
"""
if fromtar:
data =
|
pd.read_csv(textfile, parse_dates=[0], index_col=0)
|
pandas.read_csv
|
import requests
import pandas as pd
import numpy as np
import time
class FMP_CONNECTION(object):
def __init__(self,api_key:str):
self._api_key = api_key
def set_apikey(self,new_apikey):
self._api_key = new_apikey
def get_apikey(self) -> str:
return self._api_key
def _merge_dfs(first_df:pd.DataFrame, second_df:pd.DataFrame, how:str = 'left'):
cols_to_use = second_df.columns.difference(first_df.columns)
new_df = pd.merge(first_df, second_df[cols_to_use], left_index=True, right_index=True, how=how)
return new_df
def _get_df(self,url:str,is_historical:bool = False) -> pd.DataFrame:
response = requests.get(url)
if response.status_code == 200:
if response.json() == {}:
print('Requested instrument is empty when retrieving data')
return None
if is_historical == False:
response_df = pd.DataFrame.from_dict(response.json())
return response_df
else:
symbol = response.json()['symbol']
df = pd.DataFrame.from_dict(response.json()['historical'])
df.insert(0,'symbol',symbol)
df['date'] = pd.to_datetime(df['date'],infer_datetime_format=True)
df.sort_values(by='date',ascending=True,inplace=True)
df.set_index('date',inplace=True)
df.set_index = pd.to_datetime(df.index, infer_datetime_format=True)
return df
else:
raise ConnectionError('Could not connect to FMP Api, this was the response: \n',response.json())
def historical_price_by_interval(self,ticker:str,interval:str='1d') -> pd.DataFrame:
"""
Retrieve historical price data from various time granularities
Parameters
----------
ticker:str :
The ticker of the financial instrument to retrieve historical price data.
api_key:str :
your FMP API Key
interval: {1min,5min,15min,30min,1hour,4hour,1d,1w,1m,1q,1y} :
The granularity of how often the price historical data must be retrieved
(Default value = '1d')
Returns
-------
pd.DataFrame
"""
url = None
# Retrieve Historical info from 1 min to 4 hours
if interval in ['4hour','1hour','30min','15min','5min','1min']:
url = f'https://financialmodelingprep.com/api/v3/historical-chart/{interval}/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url)
historical_df.insert(0,'symbol',ticker)
if 'close' and 'date' in list(historical_df.columns):
historical_df.sort_values(by='date',ascending=True,inplace=True)
historical_df.set_index('date',inplace=True)
historical_df.index = pd.to_datetime(historical_df.index, infer_datetime_format=True)
historical_df['change'] = historical_df['close'].pct_change()
historical_df['realOpen'] = historical_df['close'].shift(1)
return historical_df
# Retrieve Daily Info
elif interval == '1d':
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url,True)
historical_df['change'] = historical_df['close'].pct_change()
historical_df['realOpen'] = historical_df['close'].shift(1)
return historical_df
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url,True)
historical_df['daily'] = pd.to_datetime(historical_df.index, infer_datetime_format=True)
# Retrieve Weekly, Monthly, Quarterly and Yearly Price Data
if interval == '1w':
historical_df['week'] = historical_df['daily'].dt.to_period('w').apply(lambda r: r.start_time)
df = historical_df.drop_duplicates(subset=['week'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1m':
historical_df['monthly'] = historical_df['daily'].astype('datetime64[M]')
df = historical_df.drop_duplicates(subset=['monthly'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1q':
historical_df['quarter'] = historical_df['daily'].dt.to_period('q')
df = historical_df.drop_duplicates(subset=['quarter'], keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1y':
historical_df['year'] = historical_df['daily'].dt.year
df = historical_df.drop_duplicates(subset=['year'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
else:
raise ValueError('unsupported interval for ',interval,'check your spelling')
def historical_closing_price(self,ticker:str,interval:str = '1d'):
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?serietype=line&apikey={self._api_key}'
df = self._get_df(url,True)
if df is None:
return None
# df['date'] = pd.to_datetime(df.index, infer_datetime_format=True)
if interval == '1d':
return df
elif interval == '1w':
df['week'] = df['date'].dt.to_period('w').apply(lambda r: r.start_time)
df = df.drop_duplicates(subset=['week'], keep='first')
df = df.drop(columns=['week'])
elif interval == '1m':
df['monthly'] = df['date'].astype('datetime64[M]')
df = df.drop_duplicates(subset=['monthly'],keep='first')
df = df.drop(columns=['monthly'])
df['date'] = df['date'].astype('datetime64[M]')
elif interval == '1q':
df['quarter'] = df['date'].dt.to_period('q')
df = df.drop_duplicates(subset=['quarter'], keep='first')
df = df.drop(columns=['quarter'])
elif interval == '1y':
df['year'] = df['date'].dt.year
df = df.drop_duplicates(subset=['year'],keep='first')
df = df.drop(columns=['year'])
df = df.drop(columns=['date'])
return df
def get_closing_prices(self,tickers:[str], interval:str = '1d', from_date:str = None):
if isinstance(tickers,str):
df = self.historical_closing_price(tickers,interval)
closing_df = pd.pivot_table(data=df,index=df.index,columns='symbol',values='close',aggfunc='mean')
closing_df.index = pd.to_datetime(closing_df.index, infer_datetime_format=True)
from_d = from_date if from_date != None else closing_df.index.min()
return closing_df[from_d:]
else:
dfs = []
for ticker in tickers:
df = self.historical_closing_price(ticker,interval)
dfs.append(df)
x =
|
pd.concat(dfs)
|
pandas.concat
|
# import sys
# sys.path.insert(0, sys.path[0].rstrip('/pipeline/src'))
# %load_ext autoreload
# %autoreload 2
# import os
# os.environ.update(dict(
# WML_USERNAME='admin',
# WML_PASSWORD='password',
# CP4D_URL='https://zen-cpd-zen.apps.pwh.ocp.csplab.local'
# ))
import os
import time
import types
import sys
from io import StringIO
import requests
import pandas as pd
from ibm_watson_machine_learning import APIClient
from ibm_ai_openscale import APIClient4ICP
from ibm_ai_openscale.engines import WatsonMachineLearningAsset
from ibm_ai_openscale.supporting_classes.enums import InputDataType, ProblemType
from ibm_ai_openscale.supporting_classes import PayloadRecord
def keys_exist(_dict, *keys):
'''
Check if path of *keys exist in nested _dict.
based on https://stackoverflow.com/questions/43491287/elegant-way-to-check-if-a-nested-key-exists-in-a-dict
'''
for key in keys:
try:
_dict = _dict[key]
except (TypeError, KeyError):
return False
return _dict
class Pipeline:
'''Object that represents a WML deployed ML model'''
def __init__(self, project_name=None, deployment_space_name=None,
model_name=None, software_spec=None, problem_type=None,
label_column=None, dataset_name=None, model_path=None,
model_type=None, **kwargs):
self.project_name = project_name
self.deployment_space_name = deployment_space_name
self.model_name = model_name
self.model_path = model_path
self.software_spec = software_spec
self.problem_type = getattr(ProblemType, problem_type) if problem_type else None
self.model_type = model_type
self.project_uid = None
self.deployment_space_uid = None
self.dataset = {}
self.dataset['name'] = dataset_name
self.dataset['label_column'] = label_column
self._problem_types = {attr:getattr(ProblemType, attr) \
for attr in vars(ProblemType) if not attr.startswith('_')}
def set_connection(self, username=None, password=None, url=None):
'''
Instantiate WML and WOS python clients.
Uses the same CP4D credentials for both WML and WOS, meaning both
services must be on same CP4D cluster.
Passed values override ENV vars which override default values.
'''
_credentials = {"username": username, "password": password, "url": url}
# check for env vars if args not passed
env_keys = dict(zip(_credentials.keys(), ['CP4D_USERNAME', "CP4D_PASSWORD", "CP4D_URL"]))
_credentials = {k: v if v else os.environ.get(env_keys[k]) for k,v in _credentials.items()}
# get default values if args not passed and env vars not present
defaults = {"username": "admin", "password": "password",
"url": "https://zen-cpd-zen.apps.pwh.ocp.csplab.local"}
_credentials = {k:v if v else defaults[k] for k,v in _credentials.items()}
self._credentials = _credentials
self.wos_client = APIClient4ICP(self._credentials)
self._credentials['instance_id'] = 'wml_local'
self._credentials['version'] = '3.0.1'
self.wml_client = APIClient(self._credentials)
def set_project(self, project_name=None):
'''
Set default project for wml python client + define client method
to extract asset details
'''
if project_name: self.project_name = project_name
assert self.project_name, 'project_name must be passed.'
# get list (len 1) of CP4D projects matching specified name
token = self.wml_client.wml_token
headers = {"content-type": "application/json", "Accept": "application/json",
"Authorization": "Bearer " + token}
project_uid_list = [x.get('metadata').get('guid') for x in requests.get(self._credentials.get('url') + '/v2/projects/', headers=headers, verify=False).json().get('resources') if x.get('entity').get('name')==self.project_name]
# set project
# ISSUE: setting default CP$D project seems to unset the default deployment space!
if len(project_uid_list) < 1:
raise ValueError((f'No project named {self.project_name} exists in'
' your CP4D Instance. Please provide the name of an existing project.'))
self.project_uid = project_uid_list[0]
self.wml_client.set.default_project(self.project_uid)
def get_asset_details(self, project_uid=None):
if project_uid:
self.set.default_project(project_uid)
if self.default_project_id is None:
raise ValueError(('There is no default project set. Set a '
'default project first or pass a project_uid to this function.'))
temp_stdout = StringIO()
true_stdout = sys.stdout
sys.stdout = temp_stdout
self.data_assets.list()
#sys.stdout = sys.__stdout__
sys.stdout = true_stdout
lines = temp_stdout.getvalue().split('\n')
keys = [x.split(' ') for x in lines][1]
keys = [x.lower() for x in keys if len(x) != 0]
end = len(lines) - 2
values = [[x for x in x.split(' ') if len(x) != 0] for x in lines if len(x) != 0]
new_list = []
for i in range(2, end):
new_list.append(dict(zip(keys, values[i])))
return new_list
self.wml_client.get_asset_details = types.MethodType(get_asset_details, self.wml_client)
# self = Pipeline()
# self.set_connection()
# path="/Users/no<EMAIL>/Desktop/projects/LowesDeploy/bitbucket_GIT_REPO/val_breast_cancer.csv"
def set_data(self, dataset_name=None, label_column=None, problem_type=None):
'''
Downloads data set stored in CP4D project data assets and loads into
memeory. The deployed model will be used to make predictions on the
downloaded dataset.
'''
if label_column: self.dataset['label_column'] = label_column
if dataset_name: self.dataset['name'] = dataset_name
if problem_type: self.problem_type = problem_type
uids = [i['asset_id'] for i in self.wml_client.get_asset_details() if i['name']==self.dataset['name']]
if len(uids)==0:
raise ValueError('Specified dataset %s is not available.' %(self.dataset['name']))
# select first data asset with specified name
path = self.wml_client.data_assets.download(uids[0], self.dataset['name'])
self.dataset['data'] = pd.read_csv(path)
os.remove(path)
self.dataset['FEATURE_COLUMNS'] = self.dataset['data'].columns.drop(self.dataset['label_column']).tolist()
# is_num = lambda dtype: np.issubdtype(dtype, np.number)
# CATEGORICAL_COLUMNS = [i for i in data_bunch.feature_names if not is_num(data_bunch.frame[i].dtype)]
# if len(CATEGORICAL_COLUMNS) == 0: CATEGORICAL_COLUMNS = None
print(self.dataset['data'].head())
def set_namespace(self, deployment_space_name=None):
'''
Establish deployment space with specified name.
'''
if deployment_space_name: self.deployment_space_name = deployment_space_name
# create new deployment space
default_space = self.wml_client.spaces.store(
{self.wml_client.spaces.ConfigurationMetaNames.NAME: self.deployment_space_name}
)
uid = default_space.get('metadata').get('guid')
# set new space as default space for future actions
# ISSUE: setting default deployment space seems to unset the default CP4D project!
self.wml_client.set.default_space(uid)
print("Deployment space created: " + self.deployment_space_name)
def store_model(self, model_path=None, model_name=None, model_type=None,
software_spec=None):
'''
Store a python ML model in the WML instance's repository
Params:
model_path: (str) model must be a .tar.gz file
'''
if model_name: self.model_name = model_name
if model_path: self.model_path = model_path
if model_type: self.model_type = model_type
if software_spec: self.software_spec = software_spec
assert self.model_name, 'model_name must be passed.'
assert self.model_path, 'model_path must be passed.'
assert self.model_type, 'model_type must be passed.'
assert self.software_spec, 'software_spec must be passed.'
sofware_spec_uid = self.wml_client.software_specifications.get_id_by_name(self.software_spec)
# wml seems to do some kind of path resolution that caused a problem at some point
self.model_path = os.path.abspath(self.model_path)
print('model path: ', self.model_path)
self.model_details = self.wml_client.repository.store_model(self.model_path,
meta_props={
self.wml_client.repository.ModelMetaNames.NAME: self.model_name,
self.wml_client.repository.ModelMetaNames.TYPE: self.model_type,
self.wml_client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
})
self.model_uid = self.model_details.get('metadata').get('guid')
print('Stored model:', self.model_details)
def deploy_model(self):
'''Deploy stored wml model'''
self.deployment = self.wml_client.deployments.create(artifact_uid = self.model_uid,
meta_props = {
self.wml_client.deployments.ConfigurationMetaNames.NAME: self.model_name,
self.wml_client.deployments.ConfigurationMetaNames.ONLINE: {}
})
self.deployment_uid = self.deployment.get('metadata').get('guid')
print("Deployment succesful! at " + self.deployment['entity']['status']['online_url']['url'])
def score_deployed_model(self):
#request_data = {self.wml_client.deployments.ScoringMetaNames.INPUT_DATA: [{"fields":self.dataset.data.columns.tolist(), "values":self.dataset.data.values.tolist()}]}
print('Scoring deployed model...')
request_payload = {'input_data':
[{'fields': self.dataset['FEATURE_COLUMNS'],
'values': self.dataset['data'][self.dataset['FEATURE_COLUMNS']].values.tolist()
}]
}
response_payload = self.wml_client.deployments.score(self.deployment_uid, request_payload)
if response_payload: print('Deployed model succesfully scored.')
return request_payload, response_payload
def set_subscription(self):
'''Create subscription to the stored model and log a request/response payload'''
# set binding to external WML instance cluster
# self.wos_client.data_mart.bindings.add('WML instance',
# WatsonMachineLearningInstance4ICP(wml_credentials = openscale_credentials)
# )
# create subscription to stored model
print('Creating subscription to WML model...')
self.subscription = self.wos_client.data_mart.subscriptions.add(WatsonMachineLearningAsset(
self.model_uid,
problem_type=self.problem_type,
input_data_type=InputDataType.STRUCTURED,
label_column=self.dataset['label_column'],
feature_columns=self.dataset['FEATURE_COLUMNS'],
#categorical_columns=self.dataset.CATEGORICAL_COLUMNS,
prediction_column='prediction',
probability_column='probability'
))
# log payload
request_payload, response_payload = self.score_deployed_model()
record = PayloadRecord(request=request_payload, response=response_payload)
#self.subscription.payload_logging.enable() # apparently not necessary
self.subscription.payload_logging.store(records=[record])
# give WOS time to ingest Payload data before attempting any monitoring.
wait = 60
print(f'Wait {wait} seconds for WOS database to update...')
time.sleep(wait)
print('Payload Table:')
self.subscription.payload_logging.show_table(limit=5)
def run_quality_monitor(self):
self.subscription.quality_monitoring.enable(threshold=.8, min_records=50)
wait = 60
print(f'Wait {wait} seconds for WOS database to update...')
time.sleep(wait)
# log feedback
ordered_features_and_target = [col['name'] for col in self.subscription.get_details()['entity']['asset_properties']['training_data_schema']['fields']]
feedback_data = self.dataset['data'][ordered_features_and_target]
self.subscription.feedback_logging.store(feedback_data.values.tolist(), data_header=True)
run_details = self.subscription.quality_monitoring.run(background_mode=False)
run_details = self.subscription.quality_monitoring.get_run_details(run_details['id'])
print('Model Qaulity Validation:')
print(pd.Series(run_details['output']['metrics']))
print(
|
pd.DataFrame(run_details['output']['confusion_matrix']['metrics_per_label'])
|
pandas.DataFrame
|
import os
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import xlogy
from pandas import DataFrame, Series, concat, qcut, cut
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc
def download_dataset(name, folder='datasets'):
"""Function for downloading and unzipping example datasets
Args:
name (str): Dataset name. Available datasets are freMPL-R, US_Accidents and Lending_Club
folder (str): Path to the folder to dataset saving
Returns:
str: Information about saved dataset
"""
datasets = {
'freMPL-R': 'https://github.com/MindSetLib/Insolver/releases/download/v0.4.4/freMPL-R.zip',
'US_Accidents': 'https://github.com/MindSetLib/Insolver/releases/download/v0.4.4/US_Accidents_June20.zip',
'US_Accidents_small': 'https://github.com/MindSetLib/Insolver/releases/download/v0.4.5/US_Accidents_small.zip',
'Lending_Club': 'https://github.com/MindSetLib/Insolver/releases/download/v0.4.4/LendingClub.zip'
}
if name not in datasets.keys():
return f'Dataset {name} is not found. Available datasets are {", ".join(datasets.keys())}'
if not os.path.exists(folder):
os.makedirs(folder)
url = datasets[name]
with urlopen(url) as file:
with ZipFile(BytesIO(file.read())) as zfile:
zfile.extractall(folder)
return f'Dataset {name} saved to "{folder}" folder'
def train_val_test_split(*arrays, val_size, test_size, random_state=0, shuffle=True, stratify=None):
"""Function for splitting dataset into train/validation/test partitions.
Args:
*arrays (array_like): Arrays to split into train/validation/test sets containing predictors.
val_size (float): The proportion of the dataset to include in validation partition.
test_size (float): The proportion of the dataset to include in test partition.
random_state (:obj:`int`, optional): Random state, passed to train_test_split() from scikit-learn. (default=0).
shuffle (:obj:`bool`, optional): Passed to train_test_split() from scikit-learn. (default=True).
stratify (:obj:`array_like`, optional): Passed to train_test_split() from scikit-learn. (default=None).
Returns:
tuple: (x_train, x_valid, x_test, y_train, y_valid, y_test).
A tuple of partitions of the initial dataset.
"""
n_arrays = len(arrays)
split1 = train_test_split(*arrays, random_state=random_state, shuffle=shuffle,
test_size=test_size, stratify=stratify)
if n_arrays > 1:
train, test = split1[0::2], split1[1::2]
if val_size != 0:
split2 = train_test_split(*train, random_state=random_state, shuffle=shuffle,
test_size=val_size / (1 - test_size), stratify=stratify)
train, valid = split2[0::2], split2[1::2]
return (*train, *valid, *test)
else:
return train, test
else:
train, test = split1[0], split1[1]
if val_size != 0:
split2 = train_test_split(train, random_state=random_state, shuffle=shuffle,
test_size=val_size / (1 - test_size), stratify=stratify)
train, valid = split2[0], split2[1]
return train, valid, test
else:
return train, test
def train_test_column_split(x, y, df_column):
"""Function for splitting dataset into train/test partitions w.r.t. a column (pd.Series).
Args:
x (pd.DataFrame): DataFrame containing predictors.
y (pd.DataFrame): DataFrame containing target variable.
df_column (pd.Series): Series for train/test split, assuming it is contained in x.
Returns:
tuple: (x_train, x_test, y_train, y_test).
A tuple of partitions of the initial dataset.
"""
x1, y1, col_name = x.copy(), y.copy(), df_column.name
y1[col_name] = df_column
return (x1[x1[col_name] == 'train'].drop(col_name, axis=1), x1[x1[col_name] == 'test'].drop(col_name, axis=1),
y1[y1[col_name] == 'train'].drop(col_name, axis=1), y1[y1[col_name] == 'test'].drop(col_name, axis=1))
def deviance_score(y, y_pred, weight=None, power=0, agg='sum'):
"""Function for Deviance evaluation.
Args:
y: Array with target variable.
y_pred: Array with predictions.
weight: Weights for weighted metric.
power:
agg: Function to calculate deviance ['sum', 'mean'] or callable are supported.
Returns:
float, value of the Poisson deviance.
"""
dict_func = {'sum': np.sum, 'mean': np.mean}
func = dict_func[agg] if agg in ['sum', 'mean'] else agg if isinstance(agg, callable) else None
if func is None:
raise ValueError
weight = 1 if weight is None else weight
if str(power).lower() in ["normal", "gaussian", "0"]:
return func(weight * np.power(y - y_pred, 2))
elif str(power).lower() in ["poisson", "1"]:
return func(2 * weight * (xlogy(y, y / y_pred) - (y - y_pred)))
elif str(power).lower() in ["gamma", "2"]:
return func(2 * weight * (np.log(y_pred / y) + y / y_pred - 1))
elif isinstance(power, str) or (0 < power < 1):
raise Exception(f"power={power} is not supported.")
else:
return func(2 * weight * (np.power(np.max(y, 0), 2 - power) / ((1 - power) * (2 - power)) -
(y * np.power(y_pred, 1 - power)) / (1 - power) +
(np.power(y_pred, 2 - power)) / (2 - power)))
def deviance_poisson(y, y_pred, weight=None, agg='sum'):
"""Function for Poisson Deviance evaluation.
Args:
y: Array with target variable.
y_pred: Array with predictions.
weight: Weights for weighted metric.
agg: Function to calculate deviance ['sum', 'mean'] or callable are supported.
Returns:
float, value of the Poisson deviance.
"""
return deviance_score(y, y_pred, weight=weight, power=1, agg=agg)
def deviance_gamma(y, y_pred, weight=None, agg='sum'):
"""Function for Gamma Deviance evaluation.
Args:
y: Array with target variable.
y_pred: Array with predictions.
weight: Weights for weighted metric.
agg: Function to calculate deviance ['sum', 'mean'] or callable are supported.
Returns:
float, value of the Gamma deviance.
"""
return deviance_score(y, y_pred, weight=weight, power=2, agg=agg)
def deviance_explained(y, y_pred, weight=None, power=0):
"""Function for Pseudo R^2 (Deviance explained) evaluation.
Args:
y: Array with target variable.
y_pred: Array with predictions.
weight: Weights for weighted metric.
power: Power for deviance calculation.
Returns:
float, value of the Pseudo R^2.
"""
dev = deviance_score(y, y_pred, weight=weight, power=power)
dev0 = deviance_score(y, np.repeat(np.mean(y), len(y)), weight=weight, power=power)
return 1 - dev/dev0
def inforamtion_value_woe(data, target, bins=10, cat_thresh=10, detail=False):
"""Function for Information value and Weight of Evidence computation.
Args:
data (pd.DataFrame): DataFrame with data to compute IV and WoE.
target (:obj:`str` or :obj:`pd.Series`): Target variable to compute IV and WoE.
bins (:obj:`int`, optional): Number of bins for WoE calculation for continuous variables.
cat_thresh (:obj:`int`, optional): Maximum number of categories for non-binned WoE calculation.
detail (:obj:`bool`, optional): Whether to return detailed results DataFrame or not. Short by default.
Returns:
pd.DataFrame, DataFrame containing the data on Information Value (depends on detail argument).
"""
detailed_result, short_result = DataFrame(), DataFrame()
target = target.name if isinstance(target, Series) else target
cols = data.columns
for ivars in cols[~cols.isin([target])]:
if (data[ivars].dtype.kind in 'bifc') and (len(np.unique(data[ivars])) > cat_thresh):
binned_x = qcut(data[ivars], bins, duplicates='drop')
d0 = DataFrame({'x': binned_x, 'y': data[target]})
else:
d0 = DataFrame({'x': data[ivars], 'y': data[target]})
d = d0.groupby("x", as_index=False).agg({"y": ["count", "sum"]})
d.columns = ['Cutoff', 'N', 'Events']
d['% of Events'] = np.maximum(d['Events'], 0.5) / d['Events'].sum()
d['Non-Events'] = d['N'] - d['Events']
d['% of Non-Events'] = np.maximum(d['Non-Events'], 0.5) / d['Non-Events'].sum()
d['WoE'] = np.log(d['% of Events'] / d['% of Non-Events'])
d['IV'] = d['WoE'] * (d['% of Events'] - d['% of Non-Events'])
d.insert(loc=0, column='Variable', value=ivars)
temp = DataFrame({"Variable": [ivars], "IV": [d['IV'].sum()]}, columns=["Variable", "IV"])
detailed_result = concat([detailed_result, temp], axis=0)
short_result =
|
concat([short_result, d], axis=0)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 21:56:08 2020
@author: <NAME>
"""
# STEP1----------------- # Importing the libraries------------
#-------------------------------------------------------------
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
import scipy.signal as ss
import csv
import sklearn
from quilt.data.ResidentMario import missingno_data
import missingno as msno
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler # for preprocessing the data
from sklearn.ensemble import RandomForestClassifier # Random forest classifier
from sklearn.tree import DecisionTreeClassifier # for Decision Tree classifier
from sklearn.svm import SVC # for SVM classification
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split # to split the data
from sklearn.model_selection import KFold # For cross vbalidation
from sklearn.model_selection import GridSearchCV # for tunnig hyper parameter it will use all combination of given parameters
from sklearn.model_selection import RandomizedSearchCV # same for tunning hyper parameter but will use random combinations of parameters
from sklearn.metrics import confusion_matrix,recall_score,precision_recall_curve,auc,roc_curve,roc_auc_score,classification_report
# STEP2------------------# Importing the DATASET ------------
#------------------------------------------------------------
# Loading data from the iMotions the path to csv file directory
os.chdir("\\ML4TakeOver\\Data\\RawData")
directory = os.getcwd()
#dataFrame_takeover_feature = pd.read_csv('takeover_cleaned_feature4ML.csv', index_col=[0])
dataFrame_takeover_feature = pd.read_csv('takeover4ML.csv', index_col=[0])
dataset = dataFrame_takeover_feature
chunk_users = ['015_M3', '015_m2', '015_M1', '014_M3', #Select a handful of ppl for saving resource
'014_M2', '014_m1']
chunk_dataset = dataset[dataset['Name'].isin(chunk_users)]
dataset = chunk_dataset
dataset.shape
###### ======================================Encoding notes=======================================
# Alarm Type: TA =2, NoA =1, FA = 0 , Z = 3
# TakeOver : TK =1 , NTK= 0
# Alarm : 339.0 =339.0, 103.0= 4, 332.0=14, 259.0=11, 16.0=2, 178.0=6, 284.0=12,
# 213.0=9, 323.0=13, 185.0=7, 84.0=3, 137.0=5, 5.0=1, 191.0=8, 254.0=10
# Mode : +1 (Auto)= +1, -1(Manual)= 0
##### ===========================================================================================
dt_tmp = dataset
dt_tmp['Takeover'] = dt_tmp.Takeover.astype('category')
# Number of "NOT-TAKEOVER" per alarm type
dataset[dataset.Takeover == 'NTK']['Coming_AlarmType'].value_counts()
# Number of "TAKEOVER" per alarm type
dataset[dataset.Takeover == 'TK']['Coming_AlarmType'].value_counts()
## STEP3========================= Eploring the data, mainly the Label (Takeover) ====================
## ===================================================================================================
# let's check the "Takeover" distributions
sns.countplot("Takeover",data=dataset)
# Let's check the Percentage for "TakeOver"
Count_NoTakeOver = len(dataset[dataset["Takeover"]== 0 ]) # Non-TakeOver are repersented by 0
Count_TakeOver = len(dataset[dataset["Takeover"]== 1 ]) # TakeOver by 1
Percentage_of_NoTakeOver = Count_NoTakeOver/(Count_NoTakeOver+Count_TakeOver)
print("percentage of None-TakeOver, 0 = ",Percentage_of_NoTakeOver*100)
Percentage_of_TakeOver= Count_TakeOver/(Count_NoTakeOver+Count_TakeOver)
print("percentage of TakeOver, 1 = ",Percentage_of_TakeOver*100)
# the amount related to valid "TakeOver" and "None-Takeover"
Amount_TakeOver = dataset[dataset["Takeover"]== 1]
Amount_NoTakeOver = dataset[dataset["Takeover"]== 0]
plt.figure(figsize=(10,6))
plt.subplot(121)
Amount_TakeOver.plot.hist(title="TakeOver", legend =None)
plt.subplot(122)
Amount_NoTakeOver.plot.hist(title="No-Takeover",legend =None)
# Pandas offers us out-of-the-box three various correlation coefficients 1) Pearson's 2) Spearman rank 3) Kendall Tau
pearson = dataset.corr(method='pearson')
# assume target attr is the "Takeover or -3", then remove corr with itself
corr_with_target = pearson.iloc[-3][:]
# attributes sorted from the most predictive
predictivity = corr_with_target.sort_values(ascending=False)
## STEP4=========================-# Prepration for Machine Learning algorithms=========================
## ====================================================================================================
# Drop useless features for ML
dataset = dataset.drop(['Timestamp','index','ID', 'Name', 'EventSource', 'ManualGear','EventW','EventN','GazeDirectionLeftY','Alarm',
'GazeDirectionLeftX', 'GazeDirectionRightX', 'GazeDirectionRightY','CurrentBrake',
'PassBy','RangeN'], axis=1) #ManualGear has only "one" value
#EventW is pretty similar to EventN
dataset.shape
#---------------------------------------------------------
# convert categorical value to the number
# convert datatype of object to int and strings
dataset['LeftLaneType'] = dataset.LeftLaneType.astype(object)
dataset['RightLaneType'] = dataset.RightLaneType.astype(object)
dataset['TOT_Class'] = dataset.TOT_Class.astype(object)
dataset['Coming_Alarm'] = dataset.Coming_Alarm.astype(object)
dataset['Takeover'] = dataset.Takeover.astype(object)
dataset['Coming_AlarmType'] = dataset.Coming_AlarmType.astype(object)
dataset['NDTask'] = dataset.NDTask.astype(object)
#****** Drop features that happing after Alarm (anything after alarm interupt takeover prediction)****************
dataset = dataset.drop(['Mode','TOT_Class', 'AlarmDuration','Coming_Alarm','ReactionTime','Coming_AlarmType'], axis=1) # Coming Alarm maybe helpful for ReactionTime
# ------------------------------------------------------.
# takeover (NT, TK) is our target
input_data = dataset.iloc[:, dataset.columns != 'Takeover']
X = input_data
y = dataset[['Takeover']].values.ravel()
# ======================================= Encoding Categorical variables =========================
# # Encoding categorical variables
from sklearn.preprocessing import StandardScaler,LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer, make_column_transformer #labelencoder class takes cat. var. and assign value to them
# List of all Categorical features
Cat_Features= ['LeftLaneType','RightLaneType','NDTask']
# Get the column index of the categorical features
categorical_features = []
for i in Cat_Features:
position = dataset.columns.get_loc(i)
categorical_features.append(position)
print(categorical_features)
# Get the column index of the Contin. features
conti_features = []
Cont_Filter = dataset.dtypes!=object
Cont_Filter = dataset.columns.where(Cont_Filter).tolist()
Cont_Filter_Cleaned = [name for name in Cont_Filter if str(name) !='nan']
for i in Cont_Filter_Cleaned:
position = dataset.columns.get_loc(i)
conti_features.append(position)
print(conti_features)
# How many columns will be needed for each categorical feature?
print(dataset[Cat_Features].nunique(),
'There are',"--",sum(dataset[Cat_Features].nunique().loc[:]),"--",'groups in the whole dataset')
# ===============================Create pipeline for data transformatin (normalize numeric, and hot encoder categorical)
# =============================================================================
from sklearn.pipeline import make_pipeline
numeric = make_pipeline(
StandardScaler())
categorical = make_pipeline(
# handles categorical features
# sparse = False output an array not sparse matrix
OneHotEncoder(sparse=False)) # Automatically take care of Dummy Trap
# creates a simple preprocessing pipeline (that will be combined in a full prediction pipeline below)
# to scale the numerical features and one-hot encode the categorical features.
preprocess = make_column_transformer((numeric, Cont_Filter_Cleaned),
(categorical, ['LeftLaneType','RightLaneType','Coming_AlarmType','NDTask']),
remainder='passthrough')
# =============================================================================
# Taking care of splitting
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.20, random_state = 42)
# apply preprocess step (normalize the numeric value and one hot encoding for the categorical)
preprocess.fit_transform(X_train)
# =============================================================================
#SVM is usually optimized using two parameters gamma,C .
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] # C: the Cost parameter, Gamma: Control Bias and variance
# A High value of Gamma leads to more accuracy but biased results and vice-versa.
# Similarly, a large value of Cost parameter (C) indicates poor accuracy but low bias and vice-versa.
tuned_parameters2 = [{'kernel': ['linear'], 'C': [1, 100]}]
model = make_pipeline(
preprocess,
SVC())
##### Try Simple Version ##############
from sklearn import svm
clf = svm.SVC()
X_train = preprocess.fit_transform(X_train)
grid_result = clf.fit(X_train, y_train)
X_test = preprocess.fit_transform(X_test)
clf.predict(X_test)
## we should try this in near future: https://machinelearningmastery.com/multi-class-classification-tutorial-keras-deep-learning-library/
##############
############################
##########################################
########################################################
######################################################################
# the GridSearchCV object with pipeline and the parameter space with 5 folds cross validation.
scores = ['precision', 'recall']
best_params = []
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
SVC(), tuned_parameters2, scoring='%s_macro' % score
)
X_train = preprocess.fit_transform(X_train)
grid_result = clf.fit(X_train, y_train)
best_params.append(grid_result.best_params_)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
X_test = preprocess.fit_transform(X_test)
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# =============================================================================
# ================= Resampling the imbalanced Label of "TakeOver" ========================================
#==========================================================================================================
# We create the preprocessing pipelines for both numeric and categorical data.
from sklearn.pipeline import Pipeline
from sklearn.utils import resample
numeric_features = Cont_Filter_Cleaned
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['LeftLaneType','RightLaneType','Coming_AlarmType','NDTask']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Separate input features and target
y = dataset.Takeover
X = dataset.drop('Takeover', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
take_over = X[X.Takeover=='TK']
not_takeover = X[X.Takeover=='NTK']
# upsample minority
not_takeover_upsampled = resample(not_takeover,
replace=True, # sample with replacement
n_samples=len(take_over), # match number in majority class
random_state=27) # reproducible results
# combine majority and upsampled minority
upsampled = pd.concat([take_over, not_takeover_upsampled])
# check new class counts
upsampled.Takeover.value_counts() #713585
# trying logistic regression again with the balanced dataset
y_train = upsampled.Takeover
X_train = upsampled.drop('Takeover', axis=1)
##### LOGISTIC REGRESSION ###############################
#########################################################
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
y_score = clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test)) # model score: 0.846
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
##### DECISION TREE ##################################
#########################################################
from sklearn.tree import DecisionTreeClassifier
clf_3 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('clf', DecisionTreeClassifier(random_state=0))])
y_score = clf_3.fit(X_train, y_train)
print("model score: %.3f" % clf_3.score(X_test, y_test)) # model score: 0.99
y_true_3, y_pred_3 = y_test, clf_3.predict(X_test)
print(classification_report(y_true_3, y_pred_3))
##### RANDOM FOREST ##################################
#########################################################
clf_2 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('clf',RandomForestClassifier(max_depth=2, random_state=0))])
y_score = clf_2.fit(X_train, y_train)
print("model score: %.3f" % clf_2.score(X_test, y_test)) # model score: 0.830
y_true_2, y_pred_2 = y_test, clf_2.predict(X_test)
print(classification_report(y_true_2, y_pred_2))
##### Regularized Greedy Forest (RGF) ##################################
############################################################################
from sklearn.utils.validation import check_random_state
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.ensemble import GradientBoostingClassifier
from rgf.sklearn import RGFClassifier
y_upsampled = upsampled.Takeover
X_upsampled = upsampled.drop('Takeover', axis=1)
clf_5 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', RGFClassifier(max_leaf=400,
algorithm="RGF_Sib",
test_interval=100,
verbose=True))])
n_folds = 5
rgf_scores = cross_val_score(clf_5,
X_upsampled,
y_upsampled,
cv=StratifiedKFold(n_folds))
rgf_score = sum(rgf_scores)/n_folds
print('RGF Classifier score: {0:.5f}'.format(rgf_score)) #RGF Classifier score: 0.92304
XGBClassifier(class_weight='balanced')
##### Gradiaent Boosting #############################################
############################################################################
from sklearn.ensemble import GradientBoostingClassifier
clf_gb = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', GradientBoostingClassifier(n_estimators=20,
learning_rate=0.01,
subsample=0.6,
random_state=127))])
gb_scores = cross_val_score(clf_gb,
X_upsampled,
y_upsampled,
scoring="f1_weighted",
cv=StratifiedKFold(n_folds))
gb_score = sum(gb_scores)/n_folds
print('Gradient Boosting Classifier score: {0:.5f}'.format(gb_score)) #score: 0.79832
print('>> Mean CV score is: ', round(np.mean(gb_scores),3))
pltt = sns.distplot(pd.Series(gb_scores,name='CV scores distribution(Gradiaent Boosting)'), color='r')
##### ADA Boost #########################################################
###########################################################################
from sklearn.ensemble import AdaBoostClassifier
clf_4 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', AdaBoostClassifier(n_estimators=100, random_state=0))])
y_score = clf_4.fit(X_train, y_train)
print("model score: %.3f" % clf_4.score(X_test, y_test)) # model score: 0.887
y_true_4, y_pred_4 = y_test, clf_4.predict(X_test)
print(classification_report(y_true_4, y_pred_4))
##### GAUSSIAN PROCESS #################################
#########################################################
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
kernel = 1.0 * RBF(1.0)
clf_3 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('clf',GaussianProcessClassifier(kernel=kernel, random_state=0))]) # model score: 0.830
y_score = clf_3.fit(X_train, y_train)
print("model score: %.3f" % clf_3.score(X_test, y_test)) # model score: 0.830
y_true_3, y_pred_3 = y_test, clf_3.predict(X_test)
print(classification_report(y_true_3, y_pred_3))
# # =============================================================================
# ================= DownSampling the majority imbalanced Label of "TakeOver" ======================================
#==================================================================================================================
# separate minority and majority classes
take_over = X[X.Takeover=='TK']
not_takeover = X[X.Takeover=='NTK']
# downsample majority
takeover_downsampled = resample(take_over,
replace = False, # sample without replacement
n_samples = len(not_takeover), # match minority n
random_state = 27) # reproducible results
# combine minority and downsampled majority
downsampled = pd.concat([takeover_downsampled, not_takeover])
# checking counts
downsampled.Takeover.value_counts()
# trying logistic regression again with the balanced dataset
y_train_down = downsampled.Takeover
X_train_down = downsampled.drop('Takeover', axis=1)
##### LOGISTIC REGRESSION ###############################
#########################################################
# Now we have a full prediction pipeline.
clf_down = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
y_score_down = clf_down.fit(X_train_down, y_train_down)
print("model score: %.3f" % clf_down.score(X_test, y_test)) # model score: 0.846
y_true, y_pred = y_test, clf_down.predict(X_test)
print(classification_report(y_true, y_pred))
##### ADA Boost ##################################
#########################################################
from sklearn.ensemble import AdaBoostClassifier
clf_4_down = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', AdaBoostClassifier(n_estimators=100, random_state=0))])
y_score = clf_4_down.fit(X_train_down, y_train_down)
print("model score: %.3f" % clf_4_down.score(X_test, y_test)) # model score: 0.887
y_true_down_4, y_pred_down_4 = y_test, clf_4_down.predict(X_test)
print(classification_report(y_true_down_4, y_pred_down_4))
# # =============================================================================
# example of one hot encoding for a neural network
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
import h5py
import pytest
# Check the GPU availability
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
# Assigning values to X, Y
y = dataset.Takeover
X = dataset.drop('Takeover', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X =
|
pd.concat([X_train, y_train], axis=1)
|
pandas.concat
|
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
|
pd.Int64Index([0], dtype='int64')
|
pandas.Int64Index
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet as fp
except ImportError: # pragma: no cover
fp = None
from .... import dataframe as md
from .... import tensor as mt
from ...datasource.read_csv import DataFrameReadCSV
from ...datasource.read_sql import DataFrameReadSQL
from ...datasource.read_parquet import DataFrameReadParquet
@pytest.mark.parametrize('chunk_size', [2, (2, 3)])
def test_set_index(setup, chunk_size):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=chunk_size)
expected = df1.set_index('y', drop=True)
df3 = df2.set_index('y', drop=True)
pd.testing.assert_frame_equal(
expected, df3.execute().fetch())
expected = df1.set_index('y', drop=False)
df4 = df2.set_index('y', drop=False)
pd.testing.assert_frame_equal(
expected, df4.execute().fetch())
expected = df1.set_index('y')
df2.set_index('y', inplace=True)
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
def test_iloc_getitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1.iloc[1]
df3 = df2.iloc[1]
result = df3.execute(extra_config={'check_series_name': False}).fetch()
pd.testing.assert_series_equal(
expected, result)
# plain index on axis 1
expected = df1.iloc[:2, 1]
df4 = df2.iloc[:2, 1]
pd.testing.assert_series_equal(
expected, df4.execute().fetch())
# slice index
expected = df1.iloc[:, 2:4]
df5 = df2.iloc[:, 2:4]
pd.testing.assert_frame_equal(
expected, df5.execute().fetch())
# plain fancy index
expected = df1.iloc[[0], [0, 1, 2]]
df6 = df2.iloc[[0], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df6.execute().fetch())
# plain fancy index with shuffled order
expected = df1.iloc[[0], [1, 2, 0]]
df7 = df2.iloc[[0], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df7.execute().fetch())
# fancy index
expected = df1.iloc[[1, 2], [0, 1, 2]]
df8 = df2.iloc[[1, 2], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df8.execute().fetch())
# fancy index with shuffled order
expected = df1.iloc[[2, 1], [1, 2, 0]]
df9 = df2.iloc[[2, 1], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df9.execute().fetch())
# one fancy index
expected = df1.iloc[[2, 1]]
df10 = df2.iloc[[2, 1]]
pd.testing.assert_frame_equal(
expected, df10.execute().fetch())
# plain index
expected = df1.iloc[1, 2]
df11 = df2.iloc[1, 2]
assert expected == df11.execute().fetch()
# bool index array
expected = df1.iloc[[True, False, True], [2, 1]]
df12 = df2.iloc[[True, False, True], [2, 1]]
pd.testing.assert_frame_equal(
expected, df12.execute().fetch())
# bool index array on axis 1
expected = df1.iloc[[2, 1], [True, False, True]]
df14 = df2.iloc[[2, 1], [True, False, True]]
pd.testing.assert_frame_equal(
expected, df14.execute().fetch())
# bool index
expected = df1.iloc[[True, False, True], [2, 1]]
df13 = df2.iloc[md.Series([True, False, True], chunk_size=1), [2, 1]]
pd.testing.assert_frame_equal(
expected, df13.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3).iloc[:3]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[:3])
series = md.Series(data, chunk_size=3).iloc[4]
assert series.execute().fetch() == data.iloc[4]
series = md.Series(data, chunk_size=3).iloc[[2, 3, 4, 9]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[2, 3, 4, 9]])
series = md.Series(data, chunk_size=3).iloc[[4, 3, 9, 2]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[4, 3, 9, 2]])
series = md.Series(data).iloc[5:]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
series = md.Series(data).iloc[selection]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# bool index
series = md.Series(data).iloc[md.Series(selection, chunk_size=4)]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# test index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)[:3]
pd.testing.assert_index_equal(
index.execute().fetch(), data[:3])
index = md.Index(data, chunk_size=3)[4]
assert index.execute().fetch() == data[4]
index = md.Index(data, chunk_size=3)[[2, 3, 4, 9]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[2, 3, 4, 9]])
index = md.Index(data, chunk_size=3)[[4, 3, 9, 2]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[4, 3, 9, 2]])
index = md.Index(data)[5:]
pd.testing.assert_index_equal(
index.execute().fetch(), data[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
index = md.Index(data)[selection]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
index = md.Index(data)[mt.tensor(selection, chunk_size=4)]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
def test_iloc_setitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1
expected.iloc[1] = 100
df2.iloc[1] = 100
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# slice index
expected.iloc[:, 2:4] = 1111
df2.iloc[:, 2:4] = 1111
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain fancy index
expected.iloc[[0], [0, 1, 2]] = 2222
df2.iloc[[0], [0, 1, 2]] = 2222
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# fancy index
expected.iloc[[1, 2], [0, 1, 2]] = 3333
df2.iloc[[1, 2], [0, 1, 2]] = 3333
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain index
expected.iloc[1, 2] = 4444
df2.iloc[1, 2] = 4444
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3)
series.iloc[:3] = 1
data.iloc[:3] = 1
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[4] = 2
data.iloc[4] = 2
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[[2, 3, 4, 9]] = 3
data.iloc[[2, 3, 4, 9]] = 3
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[5:] = 4
data.iloc[5:] = 4
pd.testing.assert_series_equal(
series.execute().fetch(), data)
# test Index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)
with pytest.raises(TypeError):
index[5:] = 4
def test_loc_getitem(setup):
rs = np.random.RandomState(0)
# index and columns are labels
raw1 = pd.DataFrame(rs.randint(10, size=(5, 4)),
index=['a1', 'a2', 'a3', 'a4', 'a5'],
columns=['a', 'b', 'c', 'd'])
# columns are labels
raw2 = raw1.copy()
raw2.reset_index(inplace=True, drop=True)
# columns are non unique and monotonic
raw3 = raw1.copy()
raw3.columns = ['a', 'b', 'b', 'd']
# columns are non unique and non monotonic
raw4 = raw1.copy()
raw4.columns = ['b', 'a', 'b', 'd']
# index that is timestamp
raw5 = raw1.copy()
raw5.index = pd.date_range('2020-1-1', periods=5)
raw6 = raw1[:0]
df1 = md.DataFrame(raw1, chunk_size=2)
df2 = md.DataFrame(raw2, chunk_size=2)
df3 = md.DataFrame(raw3, chunk_size=2)
df4 = md.DataFrame(raw4, chunk_size=2)
df5 = md.DataFrame(raw5, chunk_size=2)
df6 = md.DataFrame(raw6)
df = df2.loc[3, 'b']
result = df.execute().fetch()
expected = raw2.loc[3, 'b']
assert result == expected
df = df1.loc['a3', 'b']
result = df.execute(extra_config={'check_shape': False}).fetch()
expected = raw1.loc['a3', 'b']
assert result == expected
# test empty list
df = df1.loc[[]]
result = df.execute().fetch()
expected = raw1.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[[]]
result = df.execute().fetch()
expected = raw2.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[1:4, 'b':'d']
result = df.execute().fetch()
expected = raw2.loc[1:4, 'b': 'd']
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:4, 'b':]
result = df.execute().fetch()
expected = raw2.loc[:4, 'b':]
pd.testing.assert_frame_equal(result, expected)
# slice on axis index whose index_value does not have value
df = df1.loc['a2':'a4', 'b':]
result = df.execute().fetch()
expected = raw1.loc['a2':'a4', 'b':]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:, 'b']
result = df.execute().fetch()
expected = raw2.loc[:, 'b']
pd.testing.assert_series_equal(result, expected)
# 'b' is non-unique
df = df3.loc[:, 'b']
result = df.execute().fetch()
expected = raw3.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# 'b' is non-unique, and non-monotonic
df = df4.loc[:, 'b']
result = df.execute().fetch()
expected = raw4.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# label on axis 0
df = df1.loc['a2', :]
result = df.execute().fetch()
expected = raw1.loc['a2', :]
pd.testing.assert_series_equal(result, expected)
# label-based fancy index
df = df2.loc[[3, 0, 1], ['c', 'a', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[3, 0, 1], ['c', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index, asc sorted
df = df2.loc[[0, 1, 3], ['a', 'c', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[0, 1, 3], ['a', 'c', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index in which non-unique exists
selection = rs.randint(2, size=(5,), dtype=bool)
df = df3.loc[selection, ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
df = df3.loc[md.Series(selection), ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index on index
# whose index_value does not have value
df = df1.loc[['a3', 'a1'], ['b', 'a', 'd']]
result = df.execute(extra_config={'check_nsplits': False}).fetch()
expected = raw1.loc[['a3', 'a1'], ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# get timestamp by str
df = df5.loc['20200101']
result = df.execute(extra_config={'check_series_name': False}).fetch(
extra_config={'check_series_name': False})
expected = raw5.loc['20200101']
pd.testing.assert_series_equal(result, expected)
# get timestamp by str, return scalar
df = df5.loc['2020-1-1', 'c']
result = df.execute().fetch()
expected = raw5.loc['2020-1-1', 'c']
assert result == expected
# test empty df
df = df6.loc[[]]
result = df.execute().fetch()
expected = raw6.loc[[]]
pd.testing.assert_frame_equal(result, expected)
def test_dataframe_getitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
data2 = data.copy()
data2.index = pd.date_range('2020-1-1', periods=10)
mdf = md.DataFrame(data2, chunk_size=3)
series1 = df['c2']
pd.testing.assert_series_equal(
series1.execute().fetch(), data['c2'])
series2 = df['c5']
pd.testing.assert_series_equal(
series2.execute().fetch(), data['c5'])
df1 = df[['c1', 'c2', 'c3']]
pd.testing.assert_frame_equal(
df1.execute().fetch(), data[['c1', 'c2', 'c3']])
df2 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df2.execute().fetch(), data[['c3', 'c2', 'c1']])
df3 = df[['c1']]
pd.testing.assert_frame_equal(
df3.execute().fetch(), data[['c1']])
df4 = df[['c3', 'c1', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df4.execute().fetch(), data[['c3', 'c1', 'c2', 'c1']])
df5 = df[np.array(['c1', 'c2', 'c3'])]
pd.testing.assert_frame_equal(
df5.execute().fetch(), data[['c1', 'c2', 'c3']])
df6 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df6.execute().fetch(), data[['c3', 'c2', 'c1']])
df7 = df[1:7:2]
pd.testing.assert_frame_equal(
df7.execute().fetch(), data[1:7:2])
series3 = df['c1'][0]
assert series3.execute().fetch() == data['c1'][0]
df8 = mdf[3:7]
pd.testing.assert_frame_equal(
df8.execute().fetch(), data2[3:7])
df9 = mdf['2020-1-2': '2020-1-5']
pd.testing.assert_frame_equal(
df9.execute().fetch(), data2['2020-1-2': '2020-1-5'])
def test_dataframe_getitem_bool(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data = data.c1 > 0.5
mask = md.Series(mask_data, chunk_size=2)
# getitem by mars series
assert df[mask].execute().fetch().shape == data[mask_data].shape
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by pandas series
pd.testing.assert_frame_equal(
df[mask_data].execute().fetch(), data[mask_data])
# getitem by mars series with alignment but no shuffle
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=range(9, -1, -1))
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by mars series with shuffle alignment
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by mars series with shuffle alignment and extra element
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True, False],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4, 10])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by DataFrame with all bool columns
r = df[df > 0.5]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data > 0.5])
# getitem by tensor mask
r = df[(df['c1'] > 0.5).to_tensor()]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data['c1'] > 0.5])
def test_dataframe_getitem_using_attr(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'key', 'dtypes', 'size'])
df = md.DataFrame(data, chunk_size=2)
series1 = df.c2
pd.testing.assert_series_equal(
series1.execute().fetch(), data.c2)
# accessing column using attribute shouldn't overwrite existing attributes
assert df.key == getattr(getattr(df, '_data'), '_key')
assert df.size == data.size
pd.testing.assert_series_equal(df.dtypes, data.dtypes)
# accessing non-existing attributes should trigger exception
with pytest.raises(AttributeError):
_ = df.zzz # noqa: F841
def test_series_getitem(setup):
data = pd.Series(np.random.rand(10))
series = md.Series(data)
assert series[1].execute().fetch() == data[1]
data = pd.Series(np.random.rand(10), name='a')
series = md.Series(data, chunk_size=4)
for i in range(10):
series1 = series[i]
assert series1.execute().fetch() == data[i]
series2 = series[[0, 1, 2, 3, 4]]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[[0, 1, 2, 3, 4]])
series3 = series[[4, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[[4, 3, 2, 1, 0]])
series4 = series[[1, 2, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[[1, 2, 3, 2, 1, 0]])
#
index = ['i' + str(i) for i in range(20)]
data = pd.Series(np.random.rand(20), index=index, name='a')
series = md.Series(data, chunk_size=3)
for idx in index:
series1 = series[idx]
assert series1.execute().fetch() == data[idx]
selected = ['i1', 'i2', 'i3', 'i4', 'i5']
series2 = series[selected]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[selected])
selected = ['i4', 'i7', 'i0', 'i1', 'i5']
series3 = series[selected]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[selected])
selected = ['i0', 'i1', 'i5', 'i4', 'i0', 'i1']
series4 = series[selected]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[selected])
selected = ['i0']
series5 = series[selected]
pd.testing.assert_series_equal(
series5.execute().fetch(), data[selected])
data = pd.Series(np.random.rand(10,))
series = md.Series(data, chunk_size=3)
selected = series[:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:2])
selected = series[2:8:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[2:8:2])
data = pd.Series(np.random.rand(9), index=['c' + str(i) for i in range(9)])
series = md.Series(data, chunk_size=3)
selected = series[:'c2']
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:'c2'])
selected = series['c2':'c9']
pd.testing.assert_series_equal(
selected.execute().fetch(), data['c2':'c9'])
def test_head(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.head().execute().fetch(), data.head())
pd.testing.assert_frame_equal(
df.head(3).execute().fetch(), data.head(3))
pd.testing.assert_frame_equal(
df.head(-3).execute().fetch(), data.head(-3))
pd.testing.assert_frame_equal(
df.head(8).execute().fetch(), data.head(8))
pd.testing.assert_frame_equal(
df.head(-8).execute().fetch(), data.head(-8))
pd.testing.assert_frame_equal(
df.head(13).execute().fetch(), data.head(13))
pd.testing.assert_frame_equal(
df.head(-13).execute().fetch(), data.head(-13))
def test_tail(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.tail().execute().fetch(), data.tail())
pd.testing.assert_frame_equal(
df.tail(3).execute().fetch(), data.tail(3))
pd.testing.assert_frame_equal(
df.tail(-3).execute().fetch(), data.tail(-3))
pd.testing.assert_frame_equal(
df.tail(8).execute().fetch(), data.tail(8))
pd.testing.assert_frame_equal(
df.tail(-8).execute().fetch(), data.tail(-8))
pd.testing.assert_frame_equal(
df.tail(13).execute().fetch(), data.tail(13))
pd.testing.assert_frame_equal(
df.tail(-13).execute().fetch(), data.tail(-13))
def test_at(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
data2 = data.copy()
data2.index = np.arange(10)
df2 = md.DataFrame(data2, chunk_size=3)
with pytest.raises(ValueError):
_ = df.at[['i3, i4'], 'c1']
result = df.at['i3', 'c1'].execute().fetch()
assert result == data.at['i3', 'c1']
result = df['c1'].at['i2'].execute().fetch()
assert result == data['c1'].at['i2']
result = df2.at[3, 'c2'].execute().fetch()
assert result == data2.at[3, 'c2']
result = df2.loc[3].at['c2'].execute().fetch()
assert result == data2.loc[3].at['c2']
def test_iat(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
with pytest.raises(ValueError):
_ = df.iat[[1, 2], 3]
result = df.iat[3, 4].execute().fetch()
assert result == data.iat[3, 4]
result = df.iloc[:, 2].iat[3].execute().fetch()
assert result == data.iloc[:, 2].iat[3]
def test_setitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
data2 = np.random.rand(10)
data3 = np.random.rand(10, 2)
df = md.DataFrame(data, chunk_size=3)
df['c3'] = df['c3'] + 1
df['c10'] = 10
df[4] = mt.tensor(data2, chunk_size=4)
df['d1'] = df['c4'].mean()
df['e1'] = data2 * 2
result = df.execute().fetch()
expected = data.copy()
expected['c3'] = expected['c3'] + 1
expected['c10'] = 10
expected[4] = data2
expected['d1'] = data['c4'].mean()
expected['e1'] = data2 * 2
pd.testing.assert_frame_equal(result, expected)
# test set multiple cols with scalar
df = md.DataFrame(data, chunk_size=3)
df[['c0', 'c2']] = 1
df[['c1', 'c10']] = df['c4'].mean()
df[['c11', 'c12']] = mt.tensor(data3, chunk_size=4)
result = df.execute().fetch()
expected = data.copy()
expected[['c0', 'c2']] = 1
expected[['c1', 'c10']] = expected['c4'].mean()
expected[['c11', 'c12']] = data3
pd.testing.assert_frame_equal(result, expected)
# test set multiple rows
df = md.DataFrame(data, chunk_size=3)
df[['c1', 'c4', 'c10']] = df[['c2', 'c3', 'c4']] * 2
result = df.execute().fetch()
expected = data.copy()
expected[['c1', 'c4', 'c10']] = expected[['c2', 'c3', 'c4']] * 2
pd.testing.assert_frame_equal(result, expected)
# test setitem into empty DataFrame
df = md.DataFrame()
df['a'] = md.Series(np.arange(1, 11), chunk_size=3)
pd.testing.assert_index_equal(df.index_value.to_pandas(),
pd.RangeIndex(10))
result = df.execute().fetch()
expected = pd.DataFrame()
expected['a'] = pd.Series(np.arange(1, 11))
pd.testing.assert_frame_equal(result, expected)
df['b'] = md.Series(np.arange(2, 12), index=pd.RangeIndex(1, 11),
chunk_size=3)
result = df.execute().fetch()
expected['b'] = pd.Series(np.arange(2, 12), index=pd.RangeIndex(1, 11))
pd.testing.assert_frame_equal(result, expected)
def test_reset_index_execution(setup):
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=['falcon', 'parrot', 'lion', 'monkey'],
columns=('class', 'max_speed'))
df = md.DataFrame(data)
df2 = df.reset_index()
result = df2.execute().fetch()
expected = data.reset_index()
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=2)
df2 = df.reset_index()
result = df2.execute().fetch()
expected = data.reset_index()
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=1)
df2 = df.reset_index(drop=True)
result = df2.execute().fetch()
expected = data.reset_index(drop=True)
pd.testing.assert_frame_equal(result, expected)
index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
('bird', 'parrot'),
('mammal', 'lion'),
('mammal', 'monkey')],
names=['class', 'name'])
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=index,
columns=('type', 'max_speed'))
df = md.DataFrame(data, chunk_size=1)
df2 = df.reset_index(level='class')
result = df2.execute().fetch()
expected = data.reset_index(level='class')
pd.testing.assert_frame_equal(result, expected)
columns = pd.MultiIndex.from_tuples([('speed', 'max'), ('species', 'type')])
data.columns = columns
df = md.DataFrame(data, chunk_size=2)
df2 = df.reset_index(level='class', col_level=1, col_fill='species')
result = df2.execute().fetch()
expected = data.reset_index(level='class', col_level=1, col_fill='species')
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=3)
df.reset_index(level='class', col_level=1, col_fill='species', inplace=True)
result = df.execute().fetch()
expected = data.reset_index(level='class', col_level=1, col_fill='species')
pd.testing.assert_frame_equal(result, expected)
# Test Series
s = pd.Series([1, 2, 3, 4], name='foo',
index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
series = md.Series(s)
s2 = series.reset_index(name='bar')
result = s2.execute().fetch()
expected = s.reset_index(name='bar')
pd.testing.assert_frame_equal(result, expected)
series = md.Series(s, chunk_size=2)
s2 = series.reset_index(drop=True)
result = s2.execute().fetch()
expected = s.reset_index(drop=True)
pd.testing.assert_series_equal(result, expected)
# Test Unknown shape
data1 = pd.DataFrame(np.random.rand(10, 3), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9])
df1 = md.DataFrame(data1, chunk_size=5)
data2 = pd.DataFrame(np.random.rand(10, 3), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3])
df2 = md.DataFrame(data2, chunk_size=6)
df = (df1 + df2).reset_index(incremental_index=True)
result = df.execute().fetch()
pd.testing.assert_index_equal(result.index, pd.RangeIndex(12))
# Inconsistent with Pandas when input dataframe's shape is unknown.
result = result.sort_values(by=result.columns[0])
expected = (data1 + data2).reset_index()
np.testing.assert_array_equal(result.to_numpy(), expected.to_numpy())
data1 = pd.Series(np.random.rand(10,), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9])
series1 = md.Series(data1, chunk_size=3)
data2 = pd.Series(np.random.rand(10,), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3])
series2 = md.Series(data2, chunk_size=3)
df = (series1 + series2).reset_index(incremental_index=True)
result = df.execute().fetch()
pd.testing.assert_index_equal(result.index, pd.RangeIndex(12))
# Inconsistent with Pandas when input dataframe's shape is unknown.
result = result.sort_values(by=result.columns[0])
expected = (data1 + data2).reset_index()
np.testing.assert_array_equal(result.to_numpy(), expected.to_numpy())
series1 = md.Series(data1, chunk_size=3)
series1.reset_index(inplace=True, drop=True)
result = series1.execute().fetch()
pd.testing.assert_index_equal(result.index,
|
pd.RangeIndex(10)
|
pandas.RangeIndex
|
"""
Ensembles of subgroup-discovery-based methods based on the
scikit-learn library.
Given the similarities between the ``RandomForests`` and ``RandomSubgroups``,
whenever possible the RandomForest variables nomenclature was used.
The module structure is the following:
- The ``SubgroupPredictorBase`` base class implements a common ``fit`` method for all
the estimators in the module. This ``fit`` method calls the ``fit`` method of each
sub-estimator on random samples (with replacement, a.k.a. bootstrap) of the training set.
- The ``RandomSubgroupClassifier`` and ``RandomSubgroupRegressor`` derived
classes provide the user with concrete implementations of
the ensembles using classical Subgroup Discovery approaches
from the ``pysubgroup`` package.
``BinaryTarget`` and ``NumericTarget`` implementations are used in
``_subgroup_discovery`` as sub-estimator implementations.
Only single output problems are handled.
"""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import warnings
import pandas as pd
from joblib import Parallel, delayed
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.base import is_classifier, is_regressor
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_is_fitted # , check_random_state
from tqdm import trange
from randomsubgroups.pysubgrouputils import *
from randomsubgroups.subgroup import SubgroupPredictor
# This function '_get_n_samples_bootstrap' is taken from sklearn.ensemble._forest
def _get_n_samples_bootstrap(n_samples, max_samples):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0, 1)`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, numbers.Integral):
if not (1 <= max_samples <= n_samples):
msg = "`max_samples` must be in range 1 to {} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, numbers.Real):
if not (0 < max_samples < 1):
msg = "`max_samples` must be in range (0, 1) but got value {}"
raise ValueError(msg.format(max_samples))
return round(n_samples * max_samples)
msg = "`max_samples` should be int or float, but got type '{}'"
raise TypeError(msg.format(type(max_samples)))
class SubgroupPredictorBase(BaseEstimator):
"""
Base class for the Random Subgroups predictors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
def __init__(self,
n_estimators=200,
max_depth=2,
max_features="auto",
bootstrap=True,
max_samples=None,
n_jobs=None,
verbose=0,
search_strategy='static',
top_n=1,
result_set_size=5,
intervals_only=False,
n_bins=5,
binning='ef'):
# Ensemble parameters
self.n_estimators = n_estimators
self.max_depth = max_depth
self.max_features = max_features
self.bootstrap = bootstrap
self.max_samples = max_samples
# General parameters
self.n_jobs = n_jobs
self.verbose = verbose
# Subgroup Discovery parameters
self.search_strategy = search_strategy
self.top_n = top_n
self.result_set_size = result_set_size
self.intervals_only = intervals_only
self.n_bins = n_bins
self.binning = binning
self.is_fitted_ = False
self.estimators_ = None
self.classes_ = None
self.n_classes_ = None
self.n_features_ = None
self.n_samples = None
self.default_prediction = None
self.column_names = None
# self.balance_bins = balance_bins
# This function 'get_max_n_features' is taken from sklearn.tree._classes
def get_max_n_features(self):
if isinstance(self.max_features, str):
if self.max_features == "auto":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError("Invalid value for max_features. "
"Allowed string values are 'auto', "
"'sqrt' or 'log2'.")
elif self.max_features is None:
max_features = None
elif isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features = max_features
def _check_estimator_output(self, result):
if len(result.to_descriptions()) > 0:
if len(result.to_descriptions()) > self.top_n:
# Randomly select from the the top_n subgroups in the list result.to_descriptions()
n = np.random.choice(self.top_n, 1, replace=False)[0]
# return [result.to_descriptions()[i] for i in n]
elif self.result_set_size > 1:
n = np.random.choice(len(result.to_descriptions()))
else:
n = 0
return [result.to_descriptions()[n]]
else:
return None
def _apply(self, xy):
"""
Apply Subgroup Discovery to ``xy`` and return the subgroup predictor.
Parameters
----------
xy : {DataFrame} of shape (n_samples, n_features+1)
It includes ``n_features`` columns and one column with the target.
Returns
-------
subgroup : dict
Dictionary with the conditions and description of the subgroup
target : int
Indicates the target id to which the ``subgroup`` is associated with.
"""
if self.bootstrap:
n_samples_bootstrap = _get_n_samples_bootstrap(
self.n_samples, self.max_samples
)
xy = xy.sample(n=n_samples_bootstrap, replace=True)
if self.search_strategy in ['static', 'dynamic']:
self.ps_algorithm = LightBestFirstSearch(max_features=self.max_features, n_bins=self.n_bins,
intervals_only=self.intervals_only,
specialization=self.search_strategy,
binning=self.binning)
subgroup_predictor = self._subgroup_discovery(xy)
return subgroup_predictor
elif self.search_strategy == 'bestfirst':
self.ps_algorithm = ps.BestFirstSearch()
elif self.search_strategy == 'beam':
self.ps_algorithm = ps.BeamSearch()
elif self.search_strategy == 'apriori':
self.ps_algorithm = ps.Apriori()
else:
msg = "Unknown search strategy. Available options are: " \
"['static', 'dynamic', 'beam', 'apriori']"
raise ValueError(msg)
subset_columns = np.append(np.random.choice(self.n_features_, size=self.max_features, replace=False),
self.n_features_)
subgroup_predictor = self._subgroup_discovery(xy.iloc[:, subset_columns].copy())
return subgroup_predictor
def fit(self, x, y):
"""
Build an ensemble of subgroups for prediction from the training set (X, y).
Parameters
----------
x : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,).
The target values (class labels in classification, real numbers in regression).
Returns
-------
self : object
"""
# random_state = check_random_state(self.random_state)
if is_classifier(self):
check_classification_targets(y)
# y = np.copy(y)
# Store the classes seen during fit and encode y
self.classes_, y_encoded = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
y = y_encoded
elif is_regressor(self):
self.default_prediction = np.mean(y)
# Check that x is a dataframe, if not then
# creates a dataframe to be used in the Subgroup Discovery task
if not isinstance(x, pd.DataFrame):
self.column_names = ['Col' + str(i) for i in range(0, x.shape[1])]
x = pd.DataFrame.from_records(x, columns=self.column_names)
else:
self.column_names = x.columns
xy = x.copy()
xy['target'] = y
self.n_samples, self.n_features_ = x.shape
self.get_max_n_features()
model_desc = Parallel(n_jobs=self.n_jobs, verbose=0, backend='loky')(
delayed(self._apply)(xy.copy()) for _ in (trange(self.n_estimators)
if self.verbose else range(self.n_estimators)))
self.estimators_ = [SubgroupPredictor.from_dict(sg) for subgroup in model_desc
if subgroup is not None for sg in subgroup]
n = self.n_estimators - len(self.estimators_)
if n > 0 and self.verbose:
print("Could not find {} out of {} estimators.".format(n, self.n_estimators))
self.is_fitted_ = True
return self
def show_models(self):
# Check if fit had been called
check_is_fitted(self)
if is_classifier(self):
sorted_list = [[self.classes_[estimator.target], estimator] for estimator in
sorted(self.estimators_, key=lambda e: e.target)]
elif is_regressor(self):
sorted_list = [[estimator.target, estimator] for estimator in
sorted(self.estimators_, key=lambda e: e.target)]
else:
msg = "Unknown type of model. Must be 'regressor' or 'classifier'"
raise ValueError(msg)
[print(f"Target: {target}; Model: {estimator}") for target, estimator in sorted_list]
return
|
pd.DataFrame(sorted_list, columns=["Target", "Model"])
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s =
|
Series([2, 3], index=[True, False])
|
pandas.Series
|
import pandas as pd
import numpy as np
from plotly.offline import iplot
import plotly.io as pio
from scanpy.plotting._tools.scatterplots import _get_palette
from anndata import AnnData
from typing import Union
def river_plot(
adata: AnnData,
source: str,
target: str,
cell_number: bool = True,
title: str = 'River plot (Sankey Diagram)',
save: Union[str, None] = None,
scale: int = 1
) -> None:
"""
Parameters
----------
adata:
Annotated data matrix.
source
Obs variable containing source annotation.
target
Obs variable containing target annotation.
cell_number
If True prints the number of cells in each category. Else, doesn't display it.
title
Title for the plot.
save
Save the plot.
scale
Above 1 it increase the resolution. Below 1 it reduce the resolution. Only matter when saving the plot.
"""
adata.obs[source] = adata.obs[source].astype('str').astype('category')
adata.obs[target] = adata.obs[target].astype('str').astype('category')
df_nodes, df_links = __tool_sankey(adata, source, target, cell_number=cell_number)
__plot_sankey(df_nodes, df_links,
title=title,
save=save,
scale=scale)
def __tool_sankey(adata, source, target, cell_number=True):
# extract key_infos in adata
key_infos = pd.crosstab(adata.obs[target], adata.obs[source])
# NODES
# transform key_infos into the nodes df
nodes = [['ID', 'Label', 'Color']]
if not cell_number:
label_list = key_infos.columns.tolist() + key_infos.index.tolist()
else:
target_cell_nb = pd.crosstab(adata.obs[target], adata.obs[target], margins=True)
source_cell_nb =
|
pd.crosstab(adata.obs[source], adata.obs[source], margins=True)
|
pandas.crosstab
|
import logging
from pathlib import Path
import glob
import pandas as pd
from src.defaults import PROJECT_DIR, plant_fuels, units
import numpy as np
class Transformer:
def __init__(self, input_path, output_path, start_year, end_year, benchmark_years):
""""""
self.input_path = Path(input_path)
self.output_path = Path(output_path)
self.start_year = int(start_year)
self.end_year = int(end_year)
self.benchmark_years = int(benchmark_years)
self.folder = str(self.input_path).split("/")[-1]
self.raw_tables = self.get_raw_data()
self.maximum_capacity = pd.read_csv(
str(input_path)
+ "../../../interim/maximum_capacity/proportion_technology_demand.csv"
)
self.electricity_demand = pd.read_csv(
str(input_path) + "../../../interim/electricity_demand/demand.csv"
)
def create_muse_dataset(self):
"""
Imports the starter kits datasets and converts them into a form used
for MUSE.
"""
logger = logging.getLogger(__name__)
logger.info("Converting raw data for {}.".format(self.folder))
scenarios = ["base", "net-zero", "fossil-fuel"]
scenarios_data = {}
for scenario in scenarios:
muse_data = {}
muse_data["input"] = {
"GlobalCommodities": self.generate_global_commodities()
}
muse_data["input"]["Projections"] = self.generate_projections()
muse_data["technodata"] = {"Agents": self.generate_agents_file()}
muse_data["technodata"]["power"] = {
"ExistingCapacity": self.create_existing_capacity_power()
}
muse_data["technodata"]["power"][
"Technodata"
] = self.convert_power_technodata()
muse_data["technodata"]["power"]["CommIn"] = self.get_power_comm_in(
technodata=muse_data["technodata"]["power"]["Technodata"]
)
muse_data["technodata"]["power"]["CommOut"] = self.get_comm_out(
technodata=muse_data["technodata"]["power"]["Technodata"]
)
muse_data["technodata"]["power"][
"TechnodataTimeslices"
] = self.get_technodata_timeslices(
technodata=muse_data["technodata"]["power"]["Technodata"]
)
muse_data["technodata"]["oil"] = {
"Technodata": self.convert_oil_technodata()
}
muse_data["technodata"]["oil"]["CommIn"] = self.get_oil_comm_in(
technodata=muse_data["technodata"]["oil"]["Technodata"]
)
muse_data["technodata"]["oil"]["CommOut"] = self.get_comm_out(
technodata=muse_data["technodata"]["oil"]["Technodata"]
)
muse_data["technodata"]["oil"][
"ExistingCapacity"
] = self.create_empty_existing_capacity(self.raw_tables["Table5"])
if self.electricity_demand["RegionName"].str.contains(self.folder).any():
self.electricity_demand = self.electricity_demand[
self.electricity_demand.RegionName == self.folder
]
muse_data["technodata"]["preset"] = self.generate_preset()
muse_data["technodata"]["power"][
"Technodata"
] = self.modify_max_capacities(
technodata=muse_data["technodata"]["power"]["Technodata"]
)
muse_data["technodata"]["power"]["Technodata"] = self.create_scenarios(
scenario, muse_data["technodata"]["power"]["Technodata"]
)
scenarios_data[scenario] = muse_data
logger.info("Writing processed data for {}".format(self.folder))
self.write_results(scenarios_data)
def get_raw_data(self):
"""
Imports all starter kits data into pandas.
"""
table_directories = glob.glob(str(self.input_path / Path("*.csv")))
tables = {}
for table_directory in table_directories:
table_name = table_directory.split("/")[-1].split("_")[0]
tables[table_name] = pd.read_csv(table_directory)
return tables
def write_results(self, results_data):
"""
Writes all the processed starter kits to CSV files for use in MUSE.
"""
import os
for scenario in results_data:
output_path_scenario = self.output_path / Path(scenario)
if (
not os.path.exists(output_path_scenario)
and type(results_data[scenario]) is dict
):
os.makedirs(output_path_scenario)
for folder in results_data[scenario]:
output_path_folder = output_path_scenario / Path(folder)
for sector in results_data[scenario][folder]:
output_path = output_path_scenario / Path(folder) / Path(sector)
if (
not os.path.exists(output_path)
and type(results_data[scenario][folder][sector]) is dict
):
os.makedirs(output_path)
elif not os.path.exists(output_path_folder):
os.makedirs(output_path_folder)
if type(results_data[scenario][folder][sector]) is pd.DataFrame:
results_data[scenario][folder][sector].to_csv(
str(output_path) + ".csv", index=False
)
else:
for csv in results_data[scenario][folder][sector]:
results_data[scenario][folder][sector][csv].to_csv(
str(output_path) + "/" + csv + ".csv", index=False
)
def generate_agents_file(self):
agents = pd.read_csv("data/external/muse_data/default/technodata/Agents.csv")
agents["RegionName"] = self.folder
agents["Objsort1"] = "True"
return agents
def generate_global_commodities(self):
commodities = self.raw_tables["Table7"]
commodities["Commodity"] = commodities["Fuel"]
commodities = commodities.drop(columns="Parameter")
commodities["Fuel"] = (
commodities["Fuel"]
.str.lower()
.str.replace("light fuel oil", "LFO")
.str.replace("heavy fuel oil", "HFO")
.str.replace("crude oil", "crude_oil")
.str.replace("natural gas", "gas")
)
commodities = commodities.rename(
columns={"Value": "CommodityEmissionFactor_CO2", "Fuel": "CommodityName"}
)
commodities["CommodityType"] = "energy"
commodities["HeatRate"] = 1
commodities["Unit"] = "kg CO2/GJ"
CO2_row = {
"Commodity": "CO2fuelcombustion",
"CommodityType": "Environmental",
"CommodityName": "CO2f",
"CommodityEmissionFactor_CO2": 0,
"HeatRate": 1,
"Unit": "kt",
}
commodities = commodities.append(CO2_row, ignore_index=True)
muse_commodities = pd.read_csv(
"data/external/muse_data/default/input/GlobalCommodities.csv"
)
commodities = commodities.reindex(muse_commodities.columns, axis=1)
additional_items = [
"ProcessName",
"RegionName",
"Time",
"Level",
"CO2f",
"crude_oil",
"biomass",
"coal",
"LFO",
"HFO",
"gas",
]
fuels = units.copy()
for item in additional_items:
fuels.pop(item)
for commodity, _ in fuels.items():
entries = [commodity, "energy", commodity, 0, 1, "kg CO2/GJ"]
new_row = {
column: entry
for column, entry in zip(list(commodities.columns), entries)
}
commodities = commodities.append(new_row, ignore_index=True)
return commodities
def generate_projections(self):
from src.defaults import units
costs = self.raw_tables["Table6"]
costs["Value"] = costs["Value"]
import_costs = costs[~costs["Commodity"].str.contains("Extraction")].copy()
import_costs["Commodity"] = import_costs["Commodity"].str.replace("Imports", "")
import_costs["Commodity"] = import_costs["Commodity"].str.replace("Natural", "")
import_costs["Commodity"] = import_costs["Commodity"].str.lower()
import_costs["Commodity"] = import_costs["Commodity"].str.replace(
"light fuel oil", "LFO"
)
import_costs["Commodity"] = import_costs["Commodity"].str.replace(
"heavy fuel oil", "HFO"
)
import_costs["Commodity"] = import_costs["Commodity"].str.replace(" ", "")
import_costs["Commodity"] = import_costs["Commodity"].str.replace(
"crudeoil", "crude_oil"
)
fuels = list(pd.unique(import_costs.Commodity))
projections = import_costs.pivot_table(
index="Year", columns="Commodity", values="Value"
)
projections["RegionName"] = self.folder
projections["Attribute"] = "CommodityPrice"
projections = projections.reset_index()
projections = projections.rename(columns={"Year": "Time"})
col_order = ["RegionName", "Attribute", "Time"] + fuels
projections = projections[col_order]
commodities = units.copy()
for item in [
"ProcessName",
"RegionName",
"Time",
"Level",
"crude_oil",
"biomass",
"coal",
"LFO",
"HFO",
"gas",
]:
commodities.pop(item)
for key, _ in commodities.items():
# if key == "CO2f" or key == "electricity":
if key == "electricity":
projections[key] = 1
elif key == "uranium":
projections[
key
] = 1.764 # http://www.world-nuclear.org/uploadedfiles/org/info/pdf/economicsnp.pdf
else:
projections[key] = 0
units = {"RegionName": ["Unit"], "Attribute": ["-"], "Time": ["Year"]}
for commodity in fuels + list(commodities.keys()):
if commodity != "CO2f":
units[commodity] = ["MUS$2020/PJ"]
else:
units[commodity] = ["MUS$2020/kt"]
units_row = pd.DataFrame.from_dict(units, orient="columns")
projections_out = units_row.append(projections)
return projections_out
def create_existing_capacity_power(self):
"""
Calculates the existing power capacity from Table1 from the starter kits,
and transforms them into an ExistingCapacity dataframe for MUSE.
"""
installed_capacity = self.raw_tables["Table1"]
installed_capacity = installed_capacity.rename(
columns={"Power Generation Technology": "Technology"}
)
installed_capacity["Technology"].replace(
"Off-grid Solar PV", "Solar PV (Distributed with Storage)", inplace=True
)
latest_installed_capacity = installed_capacity[
installed_capacity.Year == installed_capacity["Year"].max()
]
technoeconomics = self.raw_tables["Table2"]
installed_capacity_cf = latest_installed_capacity.merge(
technoeconomics[technoeconomics.Parameter == "Average Capacity Factor"],
on="Technology",
)
installed_capacity_cf = installed_capacity_cf.rename(
columns={
"Value_y": "average_capacity_factor",
"Value_x": "estimated_installed_capacity_MW",
}
)
installed_capacity_cf = installed_capacity_cf.drop(
columns=["Parameter_y", "Parameter_x"]
)
installed_capacity_cf["estimated_installed_capacity_PJ_y"] = (
installed_capacity_cf.estimated_installed_capacity_MW * 24 * 365 * 0.0000036
)
installed_capacity_pj_y = installed_capacity_cf.drop(
columns=["estimated_installed_capacity_MW", "average_capacity_factor"]
)
installed_capacity_pj_y_wide = installed_capacity_pj_y.pivot(
index="Technology",
columns="Year",
values="estimated_installed_capacity_PJ_y",
).reset_index()
installed_capacity_pj_y_wide.insert(1, "RegionName", self.folder)
installed_capacity_pj_y_wide.insert(2, "Unit", "PJ/y")
muse_installed_capacity = installed_capacity_pj_y_wide.rename(
columns={"Technology": "ProcessName"}
)
muse_installed_capacity = muse_installed_capacity.rename(columns={2018: 2020})
unknown_cols = list(
range(
self.start_year + self.benchmark_years,
self.end_year,
self.benchmark_years,
)
)
for col in unknown_cols:
muse_installed_capacity[col] = (
muse_installed_capacity[col - self.benchmark_years] * 0.7
)
return muse_installed_capacity
def create_empty_existing_capacity(self, technodata):
"""
Creates an existing capacity for MUSE, where no data is available.
"""
techno = technodata
techs = list(pd.unique(techno.Technology))
existing_capacity_dict = {}
all_years = list(range(self.start_year, self.end_year, self.benchmark_years))
for tech in techs:
existing_capacity_dict[tech] = [tech, self.folder, "PJ/y"] + [0] * (
len(all_years)
)
existing_capacity = pd.DataFrame.from_dict(
existing_capacity_dict,
orient="index",
columns=["ProcessName", "RegionName", "Unit"] + all_years,
)
existing_capacity = existing_capacity.reset_index(drop=True)
existing_capacity[2020] = 100
unknown_cols = list(
range(
self.start_year + self.benchmark_years,
self.end_year,
self.benchmark_years,
)
)
for col in unknown_cols:
existing_capacity[col] = (
existing_capacity[col - self.benchmark_years] * 0.96
)
return existing_capacity
def convert_power_technodata(self):
"""
Converts Table2 from the starter kits into a Power Technodata file for MUSE.
"""
technoeconomic_data = self.raw_tables["Table2"]
growth_limits_fetched = self.raw_tables["Table8"]
growth_limits = growth_limits_fetched.copy()
growth_limits.loc[growth_limits["Technology"].str.contains("(MW)"), "Value"] = (
growth_limits.loc[growth_limits["Technology"].str.contains("(MW)"), "Value"]
* 24
* 365
* 3.6e-6
)
growth_limits.loc[
growth_limits["Technology"].str.contains("(Twh/yr)"), "Value"
] = (
growth_limits.loc[
growth_limits["Technology"].str.contains("(Twh/yr)"), "Value"
]
* 3.6
)
growth_limits["Technology"] = growth_limits.Technology.str.replace(
"Geothermal (MW)", "Geothermal Power Plant", regex=False
)
growth_limits["Technology"] = growth_limits.Technology.str.replace(
"Small Hydropower (MW)", "Small Hydropower Plant (<10MW)", regex=False
)
growth_limits["Technology"] = growth_limits.Technology.str.replace(
"Hydropower (MW)", "Large Hydropower Plant (Dam)", regex=False
)
try:
large_hydropower_limit = growth_limits[
growth_limits.Technology == "Large Hydropower Plant (Dam)"
].Value.values[0]
except:
large_hydropower_limit = 0
medium_hydropower_row = {
"Technology": "Medium Hydropower Plant (10-100MW)",
"Parameter": "Estimated Renewable Energy Potential",
"Value": large_hydropower_limit,
}
growth_limits = growth_limits.append(medium_hydropower_row, ignore_index=True)
muse_technodata = pd.read_csv(
PROJECT_DIR
/ Path("data/external/muse_data/default/technodata/power/Technodata.csv")
)
technoeconomic_data_wide = technoeconomic_data.pivot(
index="Technology", columns="Parameter", values="Value"
)
technoeconomic_data_wide = self._insert_constant_columns(
technoeconomic_data_wide, "energy", "electricity"
)
technoeconomic_data_wide = technoeconomic_data_wide.reset_index()
technoeconomic_data_wide = technoeconomic_data_wide.set_index("Technology")
growth_limits = growth_limits.reset_index()
growth_limits = growth_limits.rename(columns={"Value": "TotalCapacityLimit"})
growth_limits = growth_limits.set_index("Technology")
technoeconomic_data_wide.update(growth_limits)
technoeconomic_data_wide = technoeconomic_data_wide.reset_index()
technoeconomic_data_wide_named = technoeconomic_data_wide.rename(
columns={
"Average Capacity Factor": "UtilizationFactor",
"Capital Cost ($/kW in 2020)": "cap_par",
"Fixed Cost ($/kW/yr in 2020)": "fix_par",
"Operational Life (years)": "TechnicalLife",
"Technology": "ProcessName",
"Efficiency ": "efficiency",
}
)
technoeconomic_data_wide_named["Fuel"] = technoeconomic_data_wide_named[
"ProcessName"
].map(plant_fuels)
plants = list(pd.unique(technoeconomic_data_wide_named.ProcessName))
plant_sizes = self._generate_scaling_size(plants)
technoeconomic_data_wide_named["ScalingSize"] = technoeconomic_data_wide_named[
"ProcessName"
].map(plant_sizes)
technoeconomic_data_wide_named = technoeconomic_data_wide_named.apply(
pd.to_numeric, errors="ignore"
)
projected_capex = self.raw_tables["Table3"]
if "Table10" in self.raw_tables:
projected_fixed_costs = self.raw_tables["Table10"]
projected_fixed_costs = projected_fixed_costs.melt(id_vars="Technology")
projected_fixed_costs = projected_fixed_costs.rename(
columns={
"Technology": "ProcessName",
"variable": "Time",
"value": "fix_par",
}
)
projected_fixed_costs["Time"] = projected_fixed_costs["Time"].astype(int)
projected_capex = projected_capex.rename(
columns={"Technology": "ProcessName", "Year": "Time", "Value": "cap_par"}
)
projected_capex = projected_capex.drop(columns="Parameter")
if "Table10" in self.raw_tables:
projected_costs = pd.merge(
projected_capex,
projected_fixed_costs,
on=["ProcessName", "Time"],
how="left",
)
else:
projected_costs = projected_capex
projected_costs["fix_par"] = np.nan
projected_capex_with_unknowns = pd.merge(
projected_costs[["ProcessName", "Time"]],
technoeconomic_data_wide_named[["ProcessName"]],
how="cross",
)
with_years = (
projected_capex_with_unknowns.drop(columns="ProcessName_x")
.drop_duplicates()
.rename(columns={"ProcessName_y": "ProcessName"})
)
filled_years =
|
pd.merge(with_years, projected_costs, how="outer")
|
pandas.merge
|
import pandas as pd
import glob
import yaml
from os.path import join
from . import serialization, history
def read_data(*args, group=False, **kwargs):
iterable = _read_data(*args, **kwargs)
if group:
return group_trials(iterable)
else:
return list(iterable)
def _read_data(pattern, *args, from_csv=False, process_args=None, **kwargs):
if not process_args:
process_args = {}
for folder in glob.glob(pattern):
config_file = glob.glob(join(folder, '*.yml'))[0]
config = yaml.load(open(config_file), Loader=yaml.SafeLoader)
df = None
if from_csv:
for trial_data in sorted(glob.glob(join(folder,
'*.environment.csv'))):
df = read_csv(trial_data, **kwargs)
yield config_file, df, config
else:
for trial_data in sorted(glob.glob(join(folder, '*.sqlite'))):
df = read_sql(trial_data, **kwargs)
yield config_file, df, config
def read_sql(db, *args, **kwargs):
h = history.History(db_path=db, backup=False, readonly=True)
df = h.read_sql(*args, **kwargs)
return df
def read_csv(filename, keys=None, convert_types=False, **kwargs):
'''
Read a CSV in canonical form: ::
<agent_id, t_step, key, value, value_type>
'''
df =
|
pd.read_csv(filename)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
names=None,
cmap=plt.cm.Blues,
ax=None):
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
if ax is None:
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel=names[0],
xlabel=names[1])
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
return ax
def plot_matrix(cm, xlabels=None, ylabels=None, title=None, cmap=plt.cm.Blues):
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.set(
xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=xlabels,
yticklabels=ylabels,
title=title
)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
# fig.tight_layout()
return ax
def multiclass_dataframe(task):
df = []
for annotation in task.annotations:
data = {
'document': annotation.document_id,
'label': annotation.type_id,
'annotator': annotation.annotator,
'created': annotation.created
}
df.append(data)
df =
|
pd.DataFrame(df)
|
pandas.DataFrame
|
from Bio import SeqIO
import pandas as pd
import numpy as np
import subprocess
import os
import re
import time
import random
import itertools
import gzip
import json
import platform
import ast
import multiprocessing as mp
from multiprocessing import Manager
from os.path import expanduser
from importlib.machinery import SourceFileLoader
from scipy.stats import binom
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
def barcode_errors(sorted_sampid):
base_map = {"A":1, "T":2, "G":3, "C":4, "-":0}
for sampid in sorted_sampid:
# get mapped reads with true aln pos
mapped_reads_df = pd.read_csv("./results/barcode_sort/mapped_%s_true_nucpos.csv"%(sampid)).set_index(["amplicon_nr", "barcode"])
for amplicon_nr in mapped_reads_df.index.get_level_values(0):
amp_mapped_reads_df = mapped_reads_df.loc[amplicon_nr]
# for each unique barcode in amplicon
for barcode in set(amp_mapped_reads_df.index):
bc_amp_mapped_reads_df = amp_mapped_reads_df.loc[barcode]
# make mini map_nuc_results
map_nuc_results = {}
#if isinstance(bc_amp_mapped_reads_df, pd.Series):
print (bc_amp_mapped_reads_df)
aln_block = np.zeros((len(bc_amp_mapped_reads_df), bc_amp_mapped_reads_df["end_nucaln_pos_plus1"].max()))
for r, (barcode, row) in enumerate(bc_amp_mapped_reads_df.iterrows()):
print (aln_block[(r,)])
print (list(map(lambda res:base_map[res], list(row["seq"]))))
raise Exception
return
def assess_perr(var_freq, depth, q, err_tol):
i = 0
pe = 10**(-q/10)
pErr = 1.
while i <= var_freq:
pErr -= binom.pmf(i, depth, pe)
i += 1
if (np.isnan(pErr)):
raise Exception('pErr = nan; check freq/coverage/q = {}, {}, {}'.format(var_freq, coverage, q))
return pErr < err_tol
def variant_calling_worker(sampid, codon_table, all_bases, variant_call_df, sorted_refnames, base_qual_threshold, min_cov, min_var_prop, gene_to_proteinorf, err_tol, alnrefnun_to_truerefnum, ha_numbering_conversion, HAnum_subtype, min_var_freq):
# parse map_nuc_results and map_codon_results
try:
map_nuc_results = pd.read_csv('./results/map_nuc_results_{}.csv'.format(sampid), keep_default_na=False).set_index(['Gene', 'Position']).sort_index()
except:
#print ('WARNING: No mapped reads for %s.'%sampid)
return
map_codon_results = pd.read_csv('./results/map_codon_results_{}.csv'.format(sampid), keep_default_na=False).set_index(['Gene', 'Position']).sort_index()
for gene in sorted_refnames:
# section map_nuc_results for each gene segment
try:
gene_map_nuc_results = map_nuc_results.loc[gene].copy().sort_index()
except:
#print ('WARNING: No mapped reads for gene segment %s for %s.'%(gene, sampid))
continue
## -------------------------
# Variant calling
## -------------------------
# only consider nucleotide above minimum coverage
gene_map_nuc_results = gene_map_nuc_results[gene_map_nuc_results['Coverage']>=min_cov]
if len(gene_map_nuc_results) == 0:
# lack of coverage for gene segment
#print ('WARNING: Lack of coverage for gene segment %s for %s.'%(gene, sampid))
continue
# filter for nucleotide positions which are not conserved
polymorphic_nuc_positions = list(gene_map_nuc_results[gene_map_nuc_results[all_bases].max(axis=1)!=gene_map_nuc_results['Coverage']].index)
if len(polymorphic_nuc_positions) == 0: # no polymorphic nucleotide positions
continue
# first consolidate all nucleotide variants passing quality filters
nucpos_to_varinfo = {}
for nucpos in polymorphic_nuc_positions:
# nucpos should be within cdr
try:
gene_coord_nucrow = gene_to_proteinorf.loc[gene, nucpos]
except:
# non-coding region ignored
continue
# get map_nuc_results for nucpos in gene
nucpos_gene_map_nuc_results = gene_map_nuc_results.loc[nucpos]
# consensus base
nuc_consensus = nucpos_gene_map_nuc_results['Consensus']
nuc_consensus_freq = nucpos_gene_map_nuc_results[nuc_consensus]
# nuc_depth for position
nuc_depth = nucpos_gene_map_nuc_results['Coverage']
# for any base > 0 counts
for base in (nucpos_gene_map_nuc_results[all_bases][nucpos_gene_map_nuc_results[all_bases]>0].index):
if base == nuc_consensus:
continue
# get nucleotide frequency for each variant base
var_nuc_freq = nucpos_gene_map_nuc_results[base]
# check that it is above min frequency
if var_nuc_freq < min_var_freq:
continue
# check that it is above min proportion
var_nuc_prop = var_nuc_freq/nuc_depth
if var_nuc_prop < min_var_prop:
continue
# check if var_nuc_freq could be attributed to error
if assess_perr(var_nuc_freq, nuc_depth, base_qual_threshold, err_tol) == False:
continue
# save variants
try:
nucpos_to_varinfo[nucpos]['variants'][base] = {'var_nuc_freq':var_nuc_freq, 'var_nuc_prop':var_nuc_prop}
except:
# ...alongside other information in order of consensus, nuc_depth
nucpos_to_varinfo[nucpos] = {'nuc_consensus':nuc_consensus,
'nuc_consensus_freq':nuc_consensus_freq,
'nuc_depth':nuc_depth,
'variants':{base:{'var_nuc_freq':var_nuc_freq, 'var_nuc_prop':var_nuc_prop}}}
for nucpos in nucpos_to_varinfo.keys():
variant_info = nucpos_to_varinfo[nucpos]
# nucleotide consensus and depth
nuc_consensus = variant_info['nuc_consensus']
nuc_consensus_freq = variant_info['nuc_consensus_freq']
nuc_depth = variant_info['nuc_depth']
# get protein coordinates for nuc_pos
try:
gene_coord_nucrow = gene_to_proteinorf.loc[gene, nucpos]
except:
continue
# for each protein that the nucpos in the gene encode
for protein in gene_coord_nucrow.index:
# position of protein that nucleotide translates to
aapos = gene_coord_nucrow.loc[protein]['aa']
# convert HA numbering to desired subtype numbering
HA_num_type = None
if protein == "HA" and isinstance(ha_numbering_conversion, pd.DataFrame):
if aapos > ha_numbering_conversion.index.max():
continue
else:
ha_aapos = ha_numbering_conversion.loc[aapos, HAnum_subtype]
if pd.isna(ha_numbering_conversion.loc[aapos, HAnum_subtype]):
HA_num_type = ha_numbering_conversion.index.name
else:
aapos = int(ha_aapos)
HA_num_type = HAnum_subtype
# codon position of nucleotide
frame = gene_coord_nucrow.loc[protein]['frame']
# get starting position of codon
codon_start_pos = nucpos-(frame-1)
# expected aa consensus based on nucleotide consensus
try:
expected_aa_consensus = codon_table[''.join(list(gene_map_nuc_results.loc[codon_start_pos:codon_start_pos+2]['Consensus']))]
except:
# not enough reads on nucleotide sites constituting the codon
for base, curr_var_dict in variant_info['variants'].items():
var_nuc_freq = curr_var_dict['var_nuc_freq']
var_nuc_prop = curr_var_dict['var_nuc_prop']
print_nuc_pos = alnrefnun_to_truerefnum[gene][nucpos] if alnrefnun_to_truerefnum != None else nucpos
variant_call_df.append({'sampid':sampid,
'gene':gene,
'nucpos':print_nuc_pos, 'nuc_var':base, 'nuc_prop':var_nuc_prop, 'nuc_freq':var_nuc_freq,
'nuc_consensus':nuc_consensus, 'nuc_consensus_freq':nuc_consensus_freq, 'nuc_coverage':nuc_depth,
'protein':protein,
'aapos':aapos, "HA_num_type":HA_num_type, 'aa_var':None, 'aa_prop':None, 'aa_freq':None,
'expected_aa_consensus':None, 'aa_consensus':None, 'aa_consensus_freq':None, 'aa_coverage':None,
'codon_pos':frame, 'codon':None, 'nonsyn':None})
continue
# get the actual codon reads
codon_map_codon_results = map_codon_results.loc[gene, codon_start_pos]
codon_map_codon_results = codon_map_codon_results[codon_map_codon_results.gt(0)]
# get depth of aa based on sum of codons
aa_depth = codon_map_codon_results.sum()
# ignore premature stop codon
stop_codon_aapos = max(gene_to_proteinorf.xs(protein, level=2)['aa'])
# only allowed nucpos to have stop codons
stop_codon_nucpos = list(gene_to_proteinorf.xs(protein, level=2)[gene_to_proteinorf.xs(protein, level=2)['aa'] == stop_codon_aapos].index.get_level_values(1))
# only consider variant codon where bases for site in qn is a significant variant or consensus
variant_codon_list = [codon for codon in codon_map_codon_results.index if codon[frame-1] in [nuc_consensus]+list(variant_info['variants'].keys())]
if nucpos not in stop_codon_nucpos:
# remove premature stop codons
variant_codon_list = [codon for codon in variant_codon_list if codon_table[codon] != '*']
# continue if no more variants left
if len(variant_codon_list) == 0:
continue
# filter out any potentially errorneous nucleotide variants in other codon sites
for other_frame in range(1, 4):
if other_frame == frame:
continue
other_nucpos = nucpos+(other_frame-frame)
try:
allowed_other_nuc_variants = [nucpos_to_varinfo[other_nucpos]['nuc_consensus']] + list(nucpos_to_varinfo[other_nucpos]['variants'].keys())
except:
# other_nucpos has no significant variants
allowed_other_nuc_variants = [gene_map_nuc_results.loc[other_nucpos, 'Consensus']]
variant_codon_list = [codon for codon in variant_codon_list if codon[other_frame-1] in allowed_other_nuc_variants]
# go through each variant base
for base, curr_var_dict in variant_info['variants'].items():
var_nuc_freq = curr_var_dict['var_nuc_freq']
var_nuc_prop = curr_var_dict['var_nuc_prop']
# get aa variants and consensus
aa_consensus_freq = -1
varcodon_to_count = {}
varbase_accounted_boolean = 0
for codon in variant_codon_list:
codon_count = codon_map_codon_results[codon]
aa = codon_table[codon]
# observed codon accounted for nuc consensus
if (codon[frame-1]) == nuc_consensus:
if codon_count > aa_consensus_freq:
aa_consensus_freq = codon_count
aa_consensus = aa
aa_consensus_codon = codon
continue
# observed codon accounted for var base
elif (codon[frame-1]) == base:
varcodon_to_count[codon] = (aa, codon_count)
varbase_accounted_boolean = 1
continue
if aa_consensus_freq < 0:
# don't know what is codon consensus by nuc consensus position
if varbase_accounted_boolean == 0:
# cant find varbase codon either
print_nuc_pos = alnrefnun_to_truerefnum[gene][nucpos] if alnrefnun_to_truerefnum != None else nucpos
variant_call_df.append({'sampid':sampid,
'gene':gene,
'nucpos':print_nuc_pos, 'nuc_var':base, 'nuc_prop':var_nuc_prop, 'nuc_freq':var_nuc_freq,
'nuc_consensus':nuc_consensus, 'nuc_consensus_freq':nuc_consensus_freq, 'nuc_coverage':nuc_depth,
'protein':protein,
'aapos':aapos, "HA_num_type":HA_num_type, 'aa_var':None, 'aa_prop':None, 'aa_freq':None,
'expected_aa_consensus':None, 'aa_consensus':None, 'aa_consensus_freq':None, 'aa_coverage':None,
'codon_pos':frame, 'codon':None, 'nonsyn':None})
else:
# ... but variant codon accounted for
# - use expected nucleotide-inferred codon
for codon, (aa, codon_count) in varcodon_to_count.items():
# boolean to determine if aa is syn, nonsyn or stop codon
if aa == expected_aa_consensus:
nonsyn_bool = 0
elif aa == "*":
nonsyn_bool = -1 # stop codon
else:
nonsyn_bool = 1
print_nuc_pos = alnrefnun_to_truerefnum[gene][nucpos] if alnrefnun_to_truerefnum != None else nucpos
variant_call_df.append({'sampid':sampid,
'gene':gene,
'nucpos':print_nuc_pos, 'nuc_var':base, 'nuc_prop':var_nuc_prop, 'nuc_freq':var_nuc_freq,
'nuc_consensus':nuc_consensus, 'nuc_consensus_freq':nuc_consensus_freq, 'nuc_coverage':nuc_depth,
'protein':protein,
'aapos':aapos, "HA_num_type":HA_num_type, 'aa_var':aa, 'aa_prop':codon_count/aa_depth, 'aa_freq':codon_count,
'expected_aa_consensus':expected_aa_consensus, 'aa_consensus':None, 'aa_consensus_freq':None, 'aa_coverage':aa_depth,
'codon_pos':frame, 'codon':codon, 'nonsyn':nonsyn_bool})
continue
else:
# consensus codon accounted for
if varbase_accounted_boolean == 0:
# but cant find varbase codon - use consensus codon
hypothetical_varbase_codon = list(aa_consensus_codon)
hypothetical_varbase_codon[frame-1] = base
hypothetical_varbase_codon = "".join(hypothetical_varbase_codon)
codon_count = None
aa = codon_table[hypothetical_varbase_codon]
varcodon_to_count[hypothetical_varbase_codon] = (aa, codon_count)
for codon, (aa, codon_count) in varcodon_to_count.items():
# boolean to determine if aa is syn, nonsyn or stop codon
if aa == aa_consensus:
nonsyn_bool = 0
elif aa == "*":
nonsyn_bool = -1 # stop codon
else:
nonsyn_bool = 1
# note that nucpos is corrected to start from ATG
print_nuc_pos = alnrefnun_to_truerefnum[gene][nucpos] if alnrefnun_to_truerefnum != None else nucpos
variant_call_df.append({'sampid':sampid,
'gene':gene,
'nucpos':print_nuc_pos, 'nuc_var':base, 'nuc_prop':var_nuc_prop, 'nuc_freq':var_nuc_freq,
'nuc_consensus':nuc_consensus, 'nuc_consensus_freq':nuc_consensus_freq, 'nuc_coverage':nuc_depth,
'protein':protein,
'aapos':aapos, "HA_num_type":HA_num_type, 'aa_var':aa, 'aa_prop':None if codon_count == None else codon_count/aa_depth, 'aa_freq':codon_count,
'expected_aa_consensus':expected_aa_consensus, 'aa_consensus':aa_consensus, 'aa_consensus_freq':aa_consensus_freq, 'aa_coverage':aa_depth,
'codon_pos':frame, 'codon':codon, 'nonsyn':nonsyn_bool})
return
def variant_calling(sorted_sampid, sorted_refnames, base_qual_threshold, min_cov, min_var_prop, gene_to_proteinorf, err_tol, alnrefnun_to_truerefnum=None, ha_numbering_conversion=None, HAnum_subtype=None, min_var_freq=0, threadnum=4, reanalyze_bool=0):
"""
Call variants
"""
print ("\nTallying minority variants with minimum coverage of %i at >%.1f%% with base calling error tolerance at %.1f%%..."%(min_cov, 100*min_var_prop, 100*err_tol))
# parameters
codon_table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',
}
all_bases = ['A', 'T', 'G', 'C']
varcall_fname = './results/variant_call_MinCoV{}_MinProp{}_MinFreq{}_ErrTol{}.csv'.format(min_cov, min_var_prop, min_var_freq, err_tol)
if os.path.isfile(varcall_fname) and reanalyze_bool == 0:
variant_call_df = pd.read_csv(varcall_fname)
variant_call_df['protein'] = variant_call_df['protein'].fillna('NA')
variant_call_df = variant_call_df.set_index(['sampid', 'gene', 'nucpos']).sort_index()
else:
variant_call_df = Manager().list()
pool = mp.Pool(processes=threadnum)
results = [pool.apply_async(variant_calling_worker, args=(sampid, codon_table, all_bases, variant_call_df, sorted_refnames, base_qual_threshold, min_cov, min_var_prop, gene_to_proteinorf, err_tol, alnrefnun_to_truerefnum, ha_numbering_conversion, HAnum_subtype, min_var_freq,)) for sampid in sorted_sampid]
output = [p.get() for p in results]
# change it back to python list type
variant_call_df = list(variant_call_df)
variant_call_df = pd.DataFrame.from_dict(variant_call_df)
variant_call_df = variant_call_df.set_index(['sampid', 'gene', 'nucpos']).sort_index()
variant_call_df.to_csv(varcall_fname)
return variant_call_df
def sort_reads_worker(sampid, barcode_amp_stats, gene_to_proteinorf, gene_amplicon_to_unique_actual_pos, alnrefnun_to_truerefnum, barcode_coordinates, min_barcode_filter):
more_than_one_possible_amp_count = 0
# get all mapped reads for sampid
mapped_reads_fname = './results/mapped/mapped_%s.csv'%(sampid)
try:
mapped_reads_df = pd.read_csv(mapped_reads_fname)
except:
print ('No mapped reads found for {}'.format(sampid))
return
# skip if already checked for amplicon_nr
headers_to_check = ['amplicon_nr']
if barcode_coordinates != None:
headers_to_check.append("barcode")
if len(set(list(mapped_reads_df))&set(headers_to_check)) == len(headers_to_check):
#print ("HAHA")
return
# write new mapped reads with amplicon_nr and barcode info (if any)
new_mapped_reads = []
for r, row in mapped_reads_df.iterrows():
new_row = row.to_dict().copy()
gene = row["gene"]
start_pos = row["start_nucaln_pos"]
end_pos = row["end_nucaln_pos_plus1"]
read_nuc_seq = row["seq"]
read_pos_range = range(start_pos, end_pos)
# convert refnum pos range to true nuc pos if needed
if alnrefnun_to_truerefnum == None:
true_pos_range = read_pos_range
else:
true_pos_range = [alnrefnun_to_truerefnum[gene][p] for p in read_pos_range if p in alnrefnun_to_truerefnum[gene]]
# get amplicon_nr of read
found_amp_nr = None
cand_amp_nr = {amplicon_nr:len(set(true_pos_range)&set(gene_amplicon_to_unique_actual_pos[gene][amplicon_nr])) for amplicon_nr in gene_amplicon_to_unique_actual_pos[gene].keys() if len(set(true_pos_range)&set(gene_amplicon_to_unique_actual_pos[gene][amplicon_nr])) > 0}
if len(cand_amp_nr) == 1:
found_amp_nr = max(cand_amp_nr, key=cand_amp_nr.get)
# more than one possible candidate amplicon when searched by unique sites to ampliccon - does not make sense so skip
elif len(cand_amp_nr) > 1:
more_than_one_possible_amp_count += 1
continue
# find barcodes of mapped reads
if barcode_coordinates != None:
barcode = None
gene_barcode_coordinates = barcode_coordinates[gene]
for idx, bc_range in enumerate(gene_barcode_coordinates):
bc_range = range(bc_range[0], bc_range[1])
if set(bc_range) <= set(read_pos_range):
barcode = "".join([read_nuc_seq[idx] for idx, read_pos in enumerate(read_pos_range) if read_pos in bc_range])
if re.search("^\-+$", barcode): # overlapping amplicon
continue
else:
# ambiguous amp nr can also solved by barcode order if each amplicon has only one barcodes
if found_amp_nr == None:
if len(gene_barcode_coordinates) == len(gene_amplicon_to_unique_actual_pos[gene].keys()):
found_amp_nr = idx+1
else:
# barcode order does not ties with amplicon_nr based on unique sites - again, skip.
if idx+1 != found_amp_nr:
continue
break
# barcode with poor quality bases (gaps) = no barcode
if barcode != None and re.search("-", barcode):
barcode = None
# discard reads with no barcode
if barcode == None:
continue
# save barcode
new_row['barcode'] = barcode
# save amplicon_nr
new_row['amplicon_nr'] = found_amp_nr
# save new read
new_mapped_reads.append(new_row)
new_mapped_reads = pd.DataFrame.from_dict(new_mapped_reads)
# filter reads where barcode < min_barcode_filter
if min_barcode_filter != None:
if not os.path.isdir("./results/barcode_sort"):
os.mkdir("./results/barcode_sort")
# find all barcodes to discard
new_mapped_reads_with_truenucpos = [] # new mapped_reads_df for sampid with cdr sequences and true nuc pos positions
index_to_remove = []
new_mapped_reads = new_mapped_reads.set_index(["amplicon_nr", "barcode"]).sort_index()
for amplicon_nr in sorted(set(new_mapped_reads.index.get_level_values(0))):
barcode_to_discard = []
amp_new_mapped_reads = new_mapped_reads.loc[amplicon_nr]
for barcode in set(amp_new_mapped_reads.index):
barcode_new_mapped_reads = amp_new_mapped_reads.loc[barcode]
# only one read per barcode (and if min_barcode_filter > 1)
if isinstance(barcode_new_mapped_reads, pd.Series) and min_barcode_filter > 1:
barcode_to_discard.append(barcode)
# number of reads with barcode < min_barcode_filter
elif len(barcode_new_mapped_reads) < min_barcode_filter:
barcode_to_discard.append(barcode)
# write result to files
# barcode statistics
barcode_amp_stats.append({"sampid":sampid, "gene":gene, "amplicon_nr":amplicon_nr,
"total_reads":len(amp_new_mapped_reads), # total number of reads for each amplicon
"num_of_reads_with_barcode_abv_thres":len(amp_new_mapped_reads[~amp_new_mapped_reads.index.isin(barcode_to_discard)]), # number of reads with unique barcode satisfying min_barcode_filter
"num_of_unique_barcodes":len(set(amp_new_mapped_reads.index)), # total number of uniquie barcodes
"num_of_unique_barcoes_abv_thres":len(set(amp_new_mapped_reads.index)-set(barcode_to_discard))}) # number of unique barcodes above min_barcode_filter
# write to file all barcodes
with open("./results/barcode_sort/%s_%i_allbar.txt"%(sampid, amplicon_nr), "w") as output:
output.write("\n".join(list(amp_new_mapped_reads.index)))
# all barcodes satisfying thres
with open("./results/barcode_sort/%s_%i_allminbar.txt"%(sampid, amplicon_nr), "w") as output:
output.write("\n".join(list([barcode for barcode in amp_new_mapped_reads.index if barcode not in barcode_to_discard])))
# all reads with barcodes abv thres
with open("./results/barcode_sort/%s_%i_allminread.txt"%(sampid, amplicon_nr), "w") as output:
# discard reads with < min bcc filter
amp_new_mapped_reads = amp_new_mapped_reads[~amp_new_mapped_reads.index.isin(barcode_to_discard)]
amp_new_mapped_reads_with_truenucpos = []
for barcode, row in amp_new_mapped_reads.iterrows():
new_row = row.to_dict().copy()
new_row["amplicon_nr"] = amplicon_nr
new_row["barcode"] = barcode
gene = row["gene"]
start_pos = row["start_nucaln_pos"]
end_pos = row["end_nucaln_pos_plus1"]
read_nuc_seq = row["seq"]
read_pos_range = range(start_pos, end_pos)
# write sequence within coding region only
cdr_range = list(gene_to_proteinorf.loc[gene].index.get_level_values(0))
cdr_seq = []
true_nuc_pos = []
for idx, ref_pos in enumerate(read_pos_range):
if ref_pos in cdr_range:
cdr_seq.append(read_nuc_seq[idx])
true_nuc_pos.append(alnrefnun_to_truerefnum[gene][ref_pos])
if len(cdr_seq) == 0: # read does not cover any cdr region
continue
cdr_seq = "".join(cdr_seq)
output.write("%i %s\n"%(amplicon_nr, cdr_seq))
new_row["seq"] = cdr_seq
new_row["start_nucaln_pos"] = min(true_nuc_pos)
new_row["end_nucaln_pos_plus1"] = max(true_nuc_pos)+1
amp_new_mapped_reads_with_truenucpos.append(new_row)
amp_new_mapped_reads_with_truenucpos = pd.DataFrame.from_dict(amp_new_mapped_reads_with_truenucpos)
new_mapped_reads_with_truenucpos.append(amp_new_mapped_reads_with_truenucpos)
# add amplicon, barcode sets to discard
index_to_remove += [(amplicon_nr, barcode) for barcode in barcode_to_discard]
new_mapped_reads_with_truenucpos = pd.concat(new_mapped_reads_with_truenucpos, ignore_index=True)
new_mapped_reads_with_truenucpos = new_mapped_reads_with_truenucpos.set_index(['gene', 'amplicon_nr', 'barcode']).sort_index()
new_mapped_reads_with_truenucpos.to_csv("./results/barcode_sort/mapped_%s_true_nucpos.csv"%(sampid))
# discard reads with < min bcc filter
new_mapped_reads = new_mapped_reads[~new_mapped_reads.index.isin(index_to_remove)]
new_mapped_reads = new_mapped_reads.reset_index()
# save new_mapped_reads
new_idx_names = ["gene", "amplicon_nr"]
if min_barcode_filter != None:
new_idx_names.append("barcode")
new_mapped_reads = new_mapped_reads.set_index(new_idx_names).sort_index()
new_mapped_reads.to_csv(mapped_reads_fname)
return
def sort_reads(sorted_sampid, primer_coords, gene_to_proteinorf, threadnum=4, alnrefnun_to_truerefnum=None, barcode_coordinates=None, min_barcode_filter=None):
print ("Sort reads by amplicons %s..."%("and barcodes" if barcode_coordinates!=None else ""))
primer_coords_df = pd.read_csv(primer_coords).set_index(["gene", "amplicon_nr"])
# assign amplicon by matching unique residues that can only be found each amplicon
gene_amplicon_to_unique_actual_pos = {}
# for each gene
for gene in set(primer_coords_df.index.get_level_values(0)):
gene_primer_coords_df = primer_coords_df.loc[gene]
# get list of amplicons
amp_list = sorted(set(gene_primer_coords_df.index))
# get all positions covered by amplicon
amp_to_full_range = {amplicon_nr:range(gene_primer_coords_df.loc[amplicon_nr, "coord_start"].min(), gene_primer_coords_df.loc[amplicon_nr, "coord_end"].max()) for amplicon_nr in amp_list}
# get unique positions covered by amplicon
for amplicon_nr in amp_list:
try:
gene_amplicon_to_unique_actual_pos[gene][amplicon_nr] = list(set(amp_to_full_range[amplicon_nr]) - set([k for v in [amp_to_full_range[amp2] for amp2 in amp_list if amp2 != amplicon_nr] for k in v]))
except:
gene_amplicon_to_unique_actual_pos[gene] = {amplicon_nr:list(set(amp_to_full_range[amplicon_nr]) - set([k for v in [amp_to_full_range[amp2] for amp2 in amp_list if amp2 != amplicon_nr] for k in v]))}
# to save barcode statistics
# define multiprocessing list
barcode_amp_stats = Manager().list()
# set up worker threads
pool = mp.Pool(processes=threadnum)
results = [pool.apply_async(sort_reads_worker, args=(sampid, barcode_amp_stats, gene_to_proteinorf, gene_amplicon_to_unique_actual_pos, alnrefnun_to_truerefnum, barcode_coordinates, min_barcode_filter,)) for sampid in sorted_sampid]
output = [p.get() for p in results]
# change it back to python list type
barcode_amp_stats = list(barcode_amp_stats)
pool.close()
print ("...done.")
if len(barcode_amp_stats) > 0:
barcode_amp_stats =
|
pd.DataFrame.from_dict(barcode_amp_stats)
|
pandas.DataFrame.from_dict
|
import os
import numpy
import subprocess
import glob
import logging
from pprint import pprint
import inspect
from pathlib import Path
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
MNLI_DATA_PATH = os.getenv("MNLI_PATH", "~/workspace/data/multinli_1.0")
FEVER_DATA_PATH = os.getenv("FEVER_PATH", "~/mygit/jiant/data/FEVER/")
PAWSQQP_DATA_PATH = os.getenv("PAWS_QQP_PATH", "~/mygit/jiant/data/soroush_data/extra/datasets/glue/paws/paws-qqp")
class settings(type):
def __new__(self, name, bases, classdict):
classdict['fields'] = dict(
(str(key), str(value)) for key, value in classdict.items()
if key not in ('__module__', '__qualname__'))
return type.__new__(self, name, bases, classdict)
class bert_defaults(metaclass=settings):
per_gpu_eval_batch_size = 180
per_gpu_train_batch_size = 32
num_train_epochs = 10
decay_learning_rate = 'True'
do_lower_case = 'True'
learning_rate = 5e-5
model_name_or_path = 'bert-base-uncased'
model_type = 'bert'
class bert_large_defaults(metaclass=settings):
per_gpu_eval_batch_size = 180
per_gpu_train_batch_size = 16
num_train_epochs = 10
decay_learning_rate = 'True'
do_lower_case = 'True'
learning_rate = 5e-5
model_name_or_path = 'bert-large-uncased'
model_type = 'bert'
class xlnet_defaults(metaclass=settings):
model_type = 'xlnet',
num_train_epochs = 10
model_name_or_path = "xlnet-base-cased"
learning_rate = 3e-5
per_gpu_train_batch_size = 16,
do_lower_case = "False"
class xlnet_large_defaults(metaclass=settings):
model_type = 'xlnet',
num_train_epochs = 10
model_name_or_path = "xlnet-large-cased"
learning_rate = 1e-5
per_gpu_train_batch_size = 16
do_lower_case = "False"
class lstmatt_defaults(metaclass=settings):
model_type = 'baseline'
model_name_or_path = 'lstm-att'
learning_rate = 0.0005
num_train_epochs = 5
per_gpu_train_batch_size = 256
do_lower_case = 'False'
config_name = './config/lstmatt_small_config.json'
class bilstm_defaults(metaclass=settings):
model_type = 'baseline'
model_name_or_path = 'bilstm'
learning_rate = 0.0005
num_train_epochs = 5
per_gpu_train_batch_size = 256
do_lower_case = 'False'
config_name = './config/lstmatt_small_config.json'
class bow_defaults(metaclass=settings):
model_type = 'baseline'
model_name_or_path = 'bow'
learning_rate = 0.001
num_train_epochs = 5
per_gpu_train_batch_size = 256
do_lower_case = 'False'
config_name = './config/lstmatt_small_config.json'
class mnli_defaults(metaclass=settings):
data_dir = f'{MNLI_DATA_PATH}'
fp16 = ''
task_name = 'mnli'
do_train = ''
overwrite_output_dir = ''
per_gpu_eval_batch_size = 128
num_train_epochs = 4
class pawsqqp_defaults(metaclass=settings):
data_dir = f'{PAWSQQP_DATA_PATH}'
fp16 = ''
task_name = 'qqp'
do_train = ''
overwrite_output_dir = ''
eval_tasks = 'qqp-wang qqp-wang-test paws-qqp paws-wiki paws-qqp-all-val'
learning_rate = '5e-5'
num_train_epochs = 3
weight_decay = 0.0
per_gpu_train_batch_size = 32
per_gpu_eval_batch_size = 400
class fever_defaults(metaclass=settings):
data_dir = f'{FEVER_DATA_PATH}'
fp16 = ''
task_name = 'fever'
do_train = ''
overwrite_output_dir = ''
eval_tasks = 'fever fever-symmetric-r1 fever-symmetric-r2'
learning_rate = '2e-5'
num_train_epochs = 2
max_seq_length = 128
weight_decay = 0.0
per_gpu_train_batch_size = 32
per_gpu_eval_batch_size = 200
warmup_proportion = 0.
class fever_test_defaults(metaclass=settings):
data_dir = f'{FEVER_DATA_PATH}'
fp16 = ''
task_name = 'fever'
overwrite_output_dir = ''
eval_tasks = 'fever fever-symmetric-r1 fever-symmetric-r2'
per_gpu_eval_batch_size = 400
def execute(entry_point, kwargs):
pprint(kwargs)
args = ' '.join(f'--{str(k)} {str(v)}' for k, v in kwargs.items())
print(f"python {entry_point} {args}")
os.system(f"python {entry_point} {args}")
class Main():
def extract_subset_from_glove(self, glove_path, dictionary, output_dir):
"""Extracts a subset of vectors from the full glove dictionary
and stores them in output_dir/embeddings.pkl
"""
from models_weak import extract_subset_from_glove
extract_subset_from_glove(glove_path, dictionary, output_dir)
def extract_hard_examples(
self,
example_stats_path,
labels_file=None,
train_path=None,
task='mnli'
):
"""Given a model examples stats, filter all examples if unlearnt after epoch_num,
and store an example file in the specified directory.
"""
import pickle
import numpy as np
from pathlib import Path
import pandas as pd
output_path = example_stats_path + '/hard_examples.pkl'
examples_stats = pickle.load(open(example_stats_path + '/example_stats.pkl', 'rb'))
n_epochs = examples_stats['accuracy'].shape[1]
print("Loaded example stats,", examples_stats.keys())
if labels_file:
labels = open(labels_file, 'r').readlines()
labels_dict = dict()
for line in labels:
id, label = line.strip().split()
labels_dict[int(id)] = int(label)
if train_path:
if task == 'mnli':
df = pd.read_csv(
train_path,
sep='\t',
error_bad_lines=False,
skiprows=0,
quoting=3,
keep_default_na=False,
encoding="utf-8",)
elif task == 'fever':
import json
with open(train_path, 'r') as f:
data = [json.loads(s.strip()) for s in f.readlines()]
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import numpy as np
from typing import Callable, Tuple
from dataprocessing import F1Dataset
import pandas as pd
import GPy
import datetime
processed_laps = dict()
processed_pits = dict()
def get_or_load_data(year: int) -> Tuple[pd.DataFrame, pd.DataFrame]:
# return data if it was already processed
global processed_laps
global processed_pits
if year in processed_pits:
print("already processed")
return processed_laps[year], processed_pits[year]
data = F1Dataset('data')
races = data.races
years_races = races.loc[races['year'] == year][['raceId', 'circuitId']]
# load qualification data,obtain fastest quali time at each race for normalisation purposes
qualis = data.qualifying
qrs = qualis.merge(years_races, on='raceId')
qrs = qrs.loc[~
|
pd.isnull(qrs['q3'])
|
pandas.isnull
|
import train
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import time
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
# ML libraries
import lightgbm as lgb
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
le = preprocessing.LabelEncoder()
def main():
# test = os.environ.get("TEST_DATA")
# train_data = os.environ.get("TRAINING_DATA")
TRAINING_DATA_DIR = os.environ.get("TRAINING_DATA")
TEST_DATA = os.environ.get("TEST_DATA")
train_data =
|
pd.read_csv(TRAINING_DATA_DIR)
|
pandas.read_csv
|
"""
Define constant mappings between bit-mask values and understandable quality
flags
"""
from functools import wraps
import pandas as pd
import numpy as np
# The quality_flag field in MySQL is currently limited to 1 << 15;
# fields beyond 1 << 15 will require a change in the MySQL datatype
# for the quality_flag column. The mapping from description to bitmask
# is versioned so that future addtions or removals are backwards compatible
# without rerunning the validation on all data.
# DO NOT MODIFY THE VALUES OF THE _BITMASK_DESCRIPTION_DICT instead,
# add a increment the key and add a new value tuple, i.e. add version 2 like
# 2: {'OK': 0, 'USER FLAGGED: 1 << 0, ...} . The VERSION
# IDENTIFIER 0 - 2 must remain in their current positions. Versions 7
# and up will require another identifier bit to be determined at that
# time. The version identifier also serves to mark data in the
# database as validated. The tuples are (description, bit mask)
BITMASK_DESCRIPTION_DICT = {1: {
# start with 1 to distinguish validated vs not in DB
'OK': 0,
'USER FLAGGED': 1 << 0,
'VERSION IDENTIFIER 0': 1 << 1,
'VERSION IDENTIFIER 1': 1 << 2,
'VERSION IDENTIFIER 2': 1 << 3,
'NIGHTTIME': 1 << 4,
'CLEARSKY': 1 << 5,
'SHADED': 1 << 6,
'UNEVEN FREQUENCY': 1 << 7,
'LIMITS EXCEEDED': 1 << 8,
'CLEARSKY EXCEEDED': 1 << 9,
'STALE VALUES': 1 << 10,
'INTERPOLATED VALUES': 1 << 11,
'CLIPPED VALUES': 1 << 12,
'INCONSISTENT IRRADIANCE COMPONENTS': 1 << 13,
'DAILY VALIDATION APPLIED': 1 << 14,
'RESERVED 1': 1 << 15 # available for new flag
}
}
# logical combinations of the masks defined above.
# add a version layer for compatibility if needed in the future.
# derived masks may reference masks defined in an earlier key.
DERIVED_MASKS = {
'DAYTIME': (np.logical_not, 'NIGHTTIME'),
'DAYTIME STALE VALUES': (np.logical_and, 'DAYTIME', 'STALE VALUES'),
'DAYTIME INTERPOLATED VALUES': (
np.logical_and, 'DAYTIME', 'INTERPOLATED VALUES'),
}
# should never change unless another VERSION IDENTIFIER is required
VERSION_MASK = 0b1110
LATEST_VERSION = max(BITMASK_DESCRIPTION_DICT.keys())
DESCRIPTION_MASK_MAPPING = BITMASK_DESCRIPTION_DICT[LATEST_VERSION]
LATEST_VERSION_FLAG = (
LATEST_VERSION * DESCRIPTION_MASK_MAPPING['VERSION IDENTIFIER 0'])
DAILY_VALIDATION_FLAG = DESCRIPTION_MASK_MAPPING['DAILY VALIDATION APPLIED']
def convert_bool_flags_to_flag_mask(flags, flag_description, invert):
if flags is None:
return None
if invert:
bool_flags = ~(flags.astype(bool))
else:
bool_flags = flags.astype(bool)
return ((bool_flags * DESCRIPTION_MASK_MAPPING[flag_description])
| LATEST_VERSION_FLAG)
def mask_flags(flag_description, invert=True):
"""
Decorator that will convert a boolean pandas object into an integer,
bitmasked object when `_return_mask=True`. This decorator adds the
`_return_mask` kwarg to the decorated function. Using this decorator
to mask values ensures the description and decorated function are
clearly linked.
Parameters
----------
flag_description : str
Description of the flag to convert from a boolean to integer. Must be
a key of the DESCRIPTION_MASK_MAPPING dict.
invert : boolean
Whether to invert the boolean object before conversion e.g. if
flag_description = 'LIMITS EXCEEDED' and a True value indicates
that a parameter is within the limits, invert=True is required
for the proper mapping.
Returns
-------
flags : pandas Object
Returns the output of the decorated function (which must be a pandas
Object) as the original output or an object of type int with value
determined by the truthiness of the orignal output and flag_description
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
return_mask = kwargs.pop('_return_mask', False)
flags = f(*args, **kwargs)
if return_mask:
if isinstance(flags, tuple):
return tuple(convert_bool_flags_to_flag_mask(
f, flag_description, invert) for f in flags)
else:
return convert_bool_flags_to_flag_mask(
flags, flag_description, invert)
else:
return flags
return wrapper
return decorator
def has_data_been_validated(flags):
"""Return True (or a boolean series) if flags has been validated"""
return flags > 1
def get_version(flag):
"""Extract the version from flag"""
# will be more complicated if another version identifier must be added
return np.right_shift(flag & VERSION_MASK, 1)
def _flag_description_checks(flag_description):
if isinstance(flag_description, str):
return
else:
if len(flag_description) == 0:
raise TypeError('flag_description must have len > 0')
for k in iter(flag_description):
if not isinstance(k, str):
raise TypeError(
'Elements of flag_description must have type str')
def check_if_single_value_flagged(flag, flag_description,
_perform_checks=True):
"""Check if the single integer flag has been flagged for flag_description
Parameters
----------
flag : integer
Integer flag
flag_description : string or iterable of strings
Checks to compare againsts flag
Returns
-------
Boolean
Whether any of `flag_description` checks are represented by `flag`
Raises
------
ValueError
If flag has not been validated
TypeError
If flag_description is not a string or iterable of strings
KeyError
If flag_description is not a possible check for the flag version
"""
if _perform_checks:
if not has_data_been_validated(flag):
raise ValueError('Data has not been validated')
_flag_description_checks(flag_description)
mask_dict = BITMASK_DESCRIPTION_DICT[get_version(flag)]
if isinstance(flag_description, str):
mask = mask_dict[flag_description]
ok_mask = mask == 0
else:
mask = 0
ok_mask = False
for k in flag_description:
m = mask_dict[k]
if m == 0:
ok_mask = True
mask |= m
out = bool(flag & mask)
if ok_mask:
out |= which_data_is_ok(flag)
return out
def which_data_is_ok(flags):
"""Return True for flags that have been validated and are OK"""
return (flags & ~VERSION_MASK == 0) & has_data_been_validated(flags)
def _make_mask_series(version):
descriptions = [k for k in BITMASK_DESCRIPTION_DICT[version].keys()
if not (k.startswith('VERSION') or
k.startswith('RESERVED') or k == 'OK')]
masks = [BITMASK_DESCRIPTION_DICT[version][desc]
for desc in descriptions]
return
|
pd.Series(masks, index=descriptions)
|
pandas.Series
|
import pandas as pd
import numpy as np
from functools import partial
from os import path
from datamanager.envs import MASTER_DATA_PATH
'''
This module contains equity indicator and transformation functions for time series data based on pandas DataFrame's
'''
def calc_booktomarket(close, bookvalue):
'''
'''
# should forward fill - the book-value made known at a certain date is valid for the next year / or until the next book value is available
bookval = bookvalue.ffill()
b2m = bookval / close
return b2m
def detrended_oscillator(close):
'''
Calculate the detrended oscillator
'''
ma20 = partial(moving_avg, days=20)
ma50 = partial(moving_avg, days=50)
max20 = partial(max, period = 20)
ixlr = index_log_returns(close)
assert isinstance(ixlr.index, pd.DatetimeIndex)
sma = ma20(ixlr)
lma = ma50(ixlr)
maximum = max20(ixlr)
do = (sma - lma) / maximum
return do
def resample_monthly(data, how = 'last'):
'''
Resample the data to a monthly frequency using a specific aggregation function
'''
if(how == 'sum'):
return data.resample('M').sum()
elif(how == 'mean'):
return data.resample('M').mean()
return data.resample('M').last()
def moving_avg(data, days, min_days = None):
'''
Calculate the moving average of the daily data
Parameters
---------
data : pandas.DataFrame
days : int
min_days : int
Returns
-----------
'''
return
|
pd.rolling_mean(data, days, min_periods=min_days, freq='D')
|
pandas.rolling_mean
|
import pandas as pd
import numpy as np
import pytest
from .count_level_crossings import main
def test_all_edges():
pd.testing.assert_series_equal(
main(
data=pd.Series(
[10.0, 22.0, 18.0, 2.0, 12.0, 10.0, 18.0, 2.0],
index=pd.to_datetime(
[
"2019-08-01 15:20:10",
"2019-08-01 15:20:11",
"2019-08-01 15:20:14",
"2019-08-01 15:20:16",
"2019-08-01 15:20:18",
"2019-08-01 15:20:20",
"2019-08-01 15:20:21",
"2019-08-01 15:20:24",
]
),
),
level=10,
hysteresis=1,
edge_type=0,
)["result"],
pd.Series(
[0, 0, 0, 1, 2, 2, 2, 3],
index=pd.to_datetime(
[
"2019-08-01 15:20:10",
"2019-08-01 15:20:11",
"2019-08-01 15:20:14",
"2019-08-01 15:20:16",
"2019-08-01 15:20:18",
"2019-08-01 15:20:20",
"2019-08-01 15:20:21",
"2019-08-01 15:20:24",
]
),
),
check_dtype=False,
)
def test_falling_edges():
pd.testing.assert_series_equal(
main(
data=pd.Series(
[10.0, 22.0, 18.0, 2.0, 12.0, 10.0, 18.0, 2.0],
index=pd.to_datetime(
[
"2019-08-01 15:20:10",
"2019-08-01 15:20:11",
"2019-08-01 15:20:14",
"2019-08-01 15:20:16",
"2019-08-01 15:20:18",
"2019-08-01 15:20:20",
"2019-08-01 15:20:21",
"2019-08-01 15:20:24",
]
),
),
level=10,
hysteresis=1,
edge_type=-1,
)["result"],
pd.Series(
[0, 0, 0, 1, 1, 1, 1, 2],
index=pd.to_datetime(
[
"2019-08-01T15:20:10",
"2019-08-01T15:20:11",
"2019-08-01T15:20:14",
"2019-08-01T15:20:16",
"2019-08-01T15:20:18",
"2019-08-01T15:20:20",
"2019-08-01T15:20:21",
"2019-08-01T15:20:24",
]
),
),
check_dtype=False,
)
def test_rising_edges():
pd.testing.assert_series_equal(
main(
data=pd.Series(
[10.0, 22.0, 18.0, 2.0, 12.0, 10.0, 18.0, 2.0],
index=pd.to_datetime(
[
"2019-08-01 15:20:10",
"2019-08-01 15:20:11",
"2019-08-01 15:20:14",
"2019-08-01 15:20:16",
"2019-08-01 15:20:18",
"2019-08-01 15:20:20",
"2019-08-01 15:20:21",
"2019-08-01 15:20:24",
]
),
),
level=10,
hysteresis=1,
edge_type=1,
)["result"],
pd.Series(
[0, 0, 0, 0, 1, 1, 1, 1],
index=pd.to_datetime(
[
"2019-08-01T15:20:10",
"2019-08-01T15:20:11",
"2019-08-01T15:20:14",
"2019-08-01T15:20:16",
"2019-08-01T15:20:18",
"2019-08-01T15:20:20",
"2019-08-01T15:20:21",
"2019-08-01T15:20:24",
]
),
),
check_dtype=False,
)
def test_none():
pd.testing.assert_series_equal(
main(
data=pd.Series(
[10.0, 22.0, 18.0, None, 12.0, 10.0, np.nan, 2.0],
index=pd.to_datetime(
[
"2019-08-01 15:20:10",
"2019-08-01 15:20:11",
"2019-08-01 15:20:14",
"2019-08-01 15:20:16",
"2019-08-01 15:20:18",
"2019-08-01 15:20:20",
"2019-08-01 15:20:21",
"2019-08-01 15:20:24",
]
),
),
level=10,
hysteresis=1,
edge_type=0,
)["result"],
pd.Series(
[0, 0, 0, 0, 0, 0, 0, 1],
index=pd.to_datetime(
[
"2019-08-01T15:20:10",
"2019-08-01T15:20:11",
"2019-08-01T15:20:14",
"2019-08-01T15:20:16",
"2019-08-01T15:20:18",
"2019-08-01T15:20:20",
"2019-08-01T15:20:21",
"2019-08-01T15:20:24",
]
),
),
check_dtype=False,
)
def test_date_unsorted():
with pytest.raises(ValueError, match="data must be sorted by its index"):
main(
data=pd.Series(
{
"2019-08-01T15:20:20": 10.0,
"2019-08-01T15:20:10": 10.0,
"2019-08-01T15:20:11": 22.0,
"2019-08-01T15:20:14": 18.0,
"2019-08-01T15:20:21": 18.0,
"2019-08-01T15:20:24": 2.0,
"2019-08-01T15:20:16": 2.0,
"2019-08-01T15:20:18": 12.0,
}
),
level=10,
hysteresis=1,
edge_type=0,
)
def test_numeric_index():
pd.testing.assert_series_equal(
main(
data=pd.Series({0: 1.0, 1: 4.0, 3: 4.0, 4: 7.0}),
level=5,
hysteresis=2,
edge_type=0,
)["result"],
pd.Series({0: 0, 1: 0, 3: 0, 4: 1}),
check_dtype=False,
)
def test_numeric_unsorted():
with pytest.raises(ValueError, match="data must be sorted by its index"):
main(
data=pd.Series({3: 4.0, 0: 1.0, 1: 4.0, 4: 7.0}),
level=5,
hysteresis=2,
edge_type=0,
)["result"],
pd.Series({0: 0, 1: 0, 3: 0, 4: 1})
def test_negative_hysteresis():
with pytest.raises(ValueError, match="hysteresis must be non-negative, it is -5"):
main(
data=pd.Series({0: 1.0, 1: 4.0, 3: 4.0, 4: 7.0}),
level=7,
hysteresis=-5,
edge_type=-1,
)
def test_series_empty():
with pytest.raises(
ValueError, match="length of data must be greater than 1, it is 0"
):
main(data=
|
pd.Series(dtype=float)
|
pandas.Series
|
import os
import re
import sys
from io import StringIO
import numpy as np
import pandas as pd
from Bio import AlignIO, SeqIO
from Bio.Align.Applications import MafftCommandline, MuscleCommandline
from Bio.Phylo.PAML import yn00
import wgdi.base as base
class ks():
def __init__(self, options):
base_conf = base.config()
self.pair_pep_file = 'pair.pep'
self.pair_cds_file = 'pair.cds'
self.prot_align_file = 'prot.aln'
self.mrtrans = 'pair.mrtrans'
self.pair_yn = 'pair.yn'
self.cds_file = 'cds'
self.pep_file = 'pep'
for k, v in base_conf:
setattr(self, str(k), v)
for k, v in options:
setattr(self, str(k), v)
print(str(k), ' = ', v)
def auto_file(self):
pairs = []
p =
|
pd.read_csv(self.pairs_file, sep='\n', header=None, nrows=30)
|
pandas.read_csv
|
# Copyright (C) 2016 <NAME> <<EMAIL>>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
from . import Utils as tsutil
def testTransform_one_seed(tr1 , seed_value):
df = pd.DataFrame();
np.random.seed(seed_value)
df['A'] = np.random.normal(0, 1.0, 10);
# df['A'] = range(1, 6000);
sig = df['A'];
tr1.mOriginalSignal = "selfTestSignal";
tr1.fit(sig)
sig1 = tr1.apply(sig);
sig2 = tr1.invert(sig1)
# print(sig)
# print(sig1)
# print(sig2)
n = np.linalg.norm(sig2 - sig)
lEps = 1e-7
if(n > lEps):
print("'" + tr1.get_name("Test") + "'" , " : ", n)
print(sig.values)
print(sig1.values)
print(sig2.values)
assert(n <= lEps)
def testTransform(tr1):
for seed_value in range(0,10,100):
testTransform_one_seed(tr1, seed_value)
class cAbstractSignalTransform:
def __init__(self):
self.mOriginalSignal = None;
self.mComplexity = None;
self.mScaling = None;
self.mDebug = False;
pass
def is_applicable(self, sig):
return True;
def checkSignalType(self, sig):
# print(df.info());
type2 = sig.dtype
if(type2.kind == 'O'):
raise tsutil.PyAF_Error('Invalid Signal Column Type ' + sig.dtype);
def fit_scaling_params(self, sig):
if(self.mScaling is not None):
# self.mMeanValue = np.mean(sig);
# self.mStdValue = np.std(sig);
# lEps = 1.0e-10
self.mMinInputValue = np.min(sig);
self.mMaxInputValue = np.max(sig);
self.mInputValueRange = self.mMaxInputValue - self.mMinInputValue;
else:
pass
def scale_value(self, x):
if(np.fabs(self.mInputValueRange) < 1e-10):
return 0.0
return (x - self.mMinInputValue) / self.mInputValueRange;
def scale_signal(self, sig):
if(self.mScaling is not None):
# print("SCALE_START", sig.values[1:5]);
sig1 = sig.apply(self.scale_value);
# print("SCALE_END", sig1.values[1:5]);
return sig1;
else:
return sig;
def rescale_value(self, x):
y = self.mMinInputValue + x * self.mInputValueRange;
return y
def rescale_signal(self, sig1):
if(self.mScaling is not None):
# print("RESCALE_START", sig1.values[1:5]);
sig = sig1.apply(self.rescale_value);
# print("RESCALE_END", sig.values[1:5]);
return sig;
else:
return sig1;
def fit(self , sig):
# print("FIT_START", self.mOriginalSignal, sig.values[1:5]);
self.checkSignalType(sig)
self.fit_scaling_params(sig);
sig1 = self.scale_signal(sig);
self.specific_fit(sig1);
# print("FIT_END", self.mOriginalSignal, sig1.values[1:5]);
pass
def apply(self, sig):
# print("APPLY_START", self.mOriginalSignal, sig.values[1:5]);
self.checkSignalType(sig)
sig1 = self.scale_signal(sig);
sig2 = self.specific_apply(sig1);
# print("APPLY_END", self.mOriginalSignal, sig2.values[1:5]);
if(self.mDebug):
self.check_not_nan(sig2 , "transform_apply");
return sig2;
def invert(self, sig1):
# print("INVERT_START", self.mOriginalSignal, sig1.values[1:5]);
sig2 = self.specific_invert(sig1);
rescaled_sig = self.rescale_signal(sig2);
# print("INVERT_END", self.mOriginalSignal, rescaled_sig.values[1:5]);
return rescaled_sig;
def transformDataset(self, df, isig):
df[self.get_name(isig)] = self.apply(df[isig])
return df;
def test(self):
# import copy;
# tr1 = copy.deepcopy(self);
# testTransform(tr1);
pass
def dump_apply_invert(self, df_before_apply, df_after_apply):
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import keras
import keras.backend as backend
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras.callbacks import CSVLogger, History
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import normalize, LabelEncoder, label_binarize
"""
Created by <NAME> on 8/1/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
n_epochs = 300
batch_size = 32
def create_regressor(n_features, layers, n_outputs, optimizer=None):
input_layer = Input(shape=(n_features,))
dense = Dense(layers[0], activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
for i, layer in enumerate(layers[1:]):
dense = Dense(layer, activation='relu', name="dense_{0}".format(i + 1))(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(n_outputs, activation='sigmoid', name="output")(dense)
model = Model(inputs=input_layer, outputs=dense)
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
model.compile(optimizer=optimizer, loss=["mse"], metrics=["mae"])
return model
def random_classifier(drug_name=None, prediction_class=None):
accuracies = {}
data_directory = '../Data/CCLE/Classification/FS/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith(".csv") and not (
compound.__contains__("PLX4720") or compound.__contains__("Panobinostat")):
name = compound.split(".")[0]
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data = normalize_data(x_data)
print("Data has been normalized!")
n_samples = x_data.shape[0]
if prediction_class is None:
y_pred = np.random.random_integers(low=0, high=1, size=(n_samples, 1))
else:
if prediction_class == 1:
y_pred = np.ones(shape=[n_samples, 1])
else:
y_pred = np.zeros(shape=[n_samples, 1])
accuracies[name] = accuracy_score(y_data, y_pred)
print("%s's Accuracy\t:\t%.4f%%" % (compound.split(".")[0], 100 * accuracy_score(y_data, y_pred)))
log_path = "../Results/Classification/ML/"
log_name = "Random" + "-" + str(prediction_class) + ".csv" if prediction_class is not None else "Random.csv"
accuracies = pd.DataFrame(accuracies, index=[0])
accuracies.to_csv(log_path + log_name)
def create_SAE(n_features=50000, n_code=12):
input_layer = Input(shape=(n_features,))
dense = Dense(2048, activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.2)(dense)
dense = Dense(1024, activation='relu', name="dense_1")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(256, activation='relu', name="dense_2")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(64, activation='relu', name="dense_3")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
encoded = Dense(n_code, activation='relu', name="encoded")(dense)
dense = Dense(512, activation="relu", name="dense_4")(encoded)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
decoded = Dense(n_features, activation='sigmoid', name="decoded")(dense)
cl_output = Dense(2, activation="softmax", name="classifier")(encoded)
model = Model(inputs=input_layer, outputs=[decoded, cl_output])
model.summary()
lambda_value = 9.5581e-3
def contractive_loss(y_pred, y_true):
mse = backend.mean(backend.square(y_true - y_pred), axis=1)
w = backend.variable(value=model.get_layer('encoded').get_weights()[0]) # N inputs N_hidden
w = backend.transpose(w) # N_hidden inputs N
h = model.get_layer('encoded').output
dh = h * (1 - h) # N_batch inputs N_hidden
# N_batch inputs N_hidden * N_hidden inputs 1 = N_batch inputs 1
contractive = lambda_value * backend.sum(dh ** 2 * backend.sum(w ** 2, axis=1), axis=1)
return mse + contractive
reconstructor_loss = contractive_loss
classifier_loss = "categorical_crossentropy"
optimizer = keras.optimizers.Nadam(lr=0.005, beta_1=0.95)
model.compile(optimizer=optimizer, loss=[reconstructor_loss, classifier_loss],
loss_weights=[0.005, 0.005],
metrics={"decoded": ["mae", "mse", "mape"], "classifier": "acc"})
return model
def create_classifier(n_features=51, layers=None, n_outputs=1):
if layers is None:
layers = [1024, 256, 64, 16, 4]
input_layer = Input(shape=(n_features,))
dense = Dense(layers[0], activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
for i, layer in enumerate(layers[1:]):
dense = Dense(layer, activation='relu', name="dense_{0}".format(i + 1))(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
optimizer = keras.optimizers.adamax()
if n_outputs > 1:
dense = Dense(n_outputs, activation='softmax', name="output")(dense)
loss = keras.losses.categorical_crossentropy
else:
dense = Dense(n_outputs, activation='sigmoid', name="output")(dense)
loss = keras.losses.binary_crossentropy
model = Model(inputs=input_layer, outputs=dense)
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
return model
def load_data(data_path="../Data/CCLE/drug_response.csv", feature_selection=False):
if data_path.__contains__("/FS/"):
data = pd.read_csv(data_path)
else:
data = pd.read_csv(data_path, index_col="Cell Line")
if data_path.__contains__("Regression"):
y_data = data['IC50 (uM)']
x_data = data.drop(['IC50 (uM)'], axis=1)
else:
y_data = data['class']
x_data = data.drop(['class'], axis=1)
label_encoder = LabelEncoder()
y_data = label_encoder.fit_transform(y_data)
y_data = np.reshape(y_data, (-1, 1))
y_data = keras.utils.to_categorical(y_data, 2)
if feature_selection and not data_path.__contains__("/FS/"):
feature_names = list(pd.read_csv("../Data/BestFeatures.csv", header=None).loc[0, :])
x_data = data[feature_names]
return np.array(x_data), np.array(y_data)
def produce_classification_data(compounds):
for compound in compounds:
name = compound.split(".")[0]
print(compound, end="\t")
data = pd.read_csv("../Data/CCLE/Regression/" + name + "_preprocessed.csv")
data['class'] = np.nan
data.loc[data['IC50 (uM)'] >= 8, 'class'] = 1 # resistant
data.loc[data['IC50 (uM)'] < 8] = 0 # sensitive
data.dropna(how='any', axis=0, inplace=True)
data.drop(["IC50 (uM)"], axis=1, inplace=True)
data.to_csv("../Data/CCLE/Classification/" + name + ".csv", index_label="Cell Line")
print("Finished!")
def normalize_data(x_data, y_data=None):
x_data = pd.DataFrame(normalize(np.array(x_data), axis=0, norm='max')).values
if y_data is not None:
y_data = pd.DataFrame(np.reshape(np.array(y_data), (-1, 1)))
y_data = pd.DataFrame(normalize(np.array(y_data), axis=0, norm='max'))
return np.array(x_data), np.array(y_data)
return np.array(x_data)
def regressor(drug_name=None):
data_directory = '../Data/CCLE/Regression/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith("_preprocessed.csv"):
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized!")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.15, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
# for optimizer in optimizers:
model = create_regressor(x_train.shape[1], [1024, 256, 64, 4], 1, None)
logger_path = '../Results/Regression/' + compound.split(".")[0] + ".log"
csv_logger = CSVLogger(logger_path)
model.summary()
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
result = pd.read_csv(logger_path, delimiter=',')
plt.figure(figsize=(15, 10))
plt.plot(result['epoch'], result["loss"], label="Training Loss")
plt.plot(result['epoch'], result["val_loss"], label="Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("MSE Loss")
plt.xticks([i for i in range(0, n_epochs + 5, 5)])
plt.yticks(np.arange(0.25, -0.05, -0.05).tolist())
plt.title(compound.split(".")[0])
plt.grid()
plt.savefig("../Results/Regression/images/%s.png" % compound.split(".")[0])
plt.close("all")
model.save("../Results/Regression/%s.h5" % compound.split(".")[0])
def regressor_with_different_optimizers():
data_path = "../Data/CCLE/Regression/ZD-6474_preprocessed.csv"
optimizers = [
keras.optimizers.SGD(lr=0.1, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.SGD(lr=0.001, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.Adagrad(lr=0.01, decay=1e-6),
keras.optimizers.Adadelta(lr=1.0, rho=0.95, decay=1e-6),
keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.99, decay=1e-6),
keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999)
]
print("Loading Data...")
x_data, y_data = load_data(data_path, feature_selection=True)
print("Data has been Loaded.")
print("Normalizing Data...")
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized.")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
n_features = x_train.shape[1]
layers = [1024, 256, 64, 8]
n_outputs = 1
for idx, optimizer in enumerate(optimizers):
model = create_regressor(n_features, layers, n_outputs, optimizer)
logger_path = "../Results/Optimizers/"
optimizer_name = str(optimizer.__class__).split(".")[-1].split("\'")[0] + "_"
optimizer_name += '_'.join(
["%s_%.4f" % (key, value) for (key, value) in optimizer.get_config().items()])
optimizer_name += '.log'
csv_logger = CSVLogger(logger_path + optimizer_name)
model.summary()
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
def regressor_with_k_best_features(k=50):
data_directory = '../Data/CCLE/'
compounds = os.listdir(data_directory)
feature_names = list(pd.read_csv("../Data/BestFeatures.csv", header=None).loc[0, :])
for compound in compounds:
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound)
print("Data has been Loaded!")
x_data = x_data[feature_names]
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized!")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
for k in [50, 40, 30, 20, 10, 5, 4, 3, 2, 1]:
model = create_regressor(x_train.shape[1], [32, 16, 4], 1)
dir_name = "../Results/Drugs/%s/%dFeaturesSelection" % (compound.split(".")[0], k)
os.makedirs(dir_name)
csv_logger = CSVLogger(dir_name + '/best_%s_%d.log' % (compound.split(".")[0], k))
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
import csv
with open("../Results/Drugs/%s/%s.csv" % (compound.split(".")[0], compound.split(".")[0]), 'a') as file:
writer = csv.writer(file)
loss = model.evaluate(x_test.as_matrix(), y_test.as_matrix(), verbose=0)
loss.insert(0, k)
writer.writerow(loss)
df = pd.read_csv("../Results/Drugs/%s/%s.csv" % (compound.split(".")[0], compound.split(".")[0]), header=None)
plt.figure()
plt.plot(df[0], df[1], "-o")
plt.xlabel("# of Features")
plt.ylabel("Mean Absolute Error")
plt.title(compound.split(".")[0])
plt.savefig("../Results/Drugs/%s/%s.png" % (compound.split(".")[0], compound.split(".")[0]))
def classifier(drug_name=None):
data_directory = '../Data/CCLE/Classification/FS/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith(".csv"):
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data = normalize_data(x_data)
print("Data has been normalized!")
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.05, shuffle=True)
# print("x_train shape\t:\t" + str(x_train.shape))
# print("y_train shape\t:\t" + str(y_train.shape))
# print("x_test shape\t:\t" + str(x_test.shape))
# print("y_test shape\t:\t" + str(y_test.shape))
logger_path = "../Results/Classification/CV/"
# plt.figure(figsize=(15, 10))
# plt.title(compound.split(".")[0])
model = None
for k in range(10, 15, 5):
model = KerasClassifier(build_fn=create_classifier,
epochs=500,
batch_size=64,
verbose=2,
)
# y_data = encode_labels(y_data, 2)
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.25)
model.fit(x_train, y_train, validation_data=(x_test, y_test))
print(x_test.shape)
print(y_test.shape)
y_pred = model.predict(x_test)
y_pred = np.reshape(y_pred, (-1, 1))
y_test = np.reshape(y_test, (-1, 1))
print("Accuracy: %.4f %%" % (accuracy_score(y_test, y_pred) * 100))
print(y_pred.shape)
print(y_test.shape)
n_classes = y_pred.shape[1]
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
plt.close("all")
plt.figure()
lw = 2
plt.plot(fpr[0], tpr[0], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for %s' % compound.split(".")[0])
plt.legend(loc="lower right")
# plt.show()
plt.savefig("../Results/Classification/ML/ROC/Deep Learning/%s.pdf" % (compound.split(".")[0]))
# log_name = "Stratified %s-%d-cv.csv" % (compound.split(".")[0], k)
# for x_train_cv, x_validation, y_train_cv, y_validation in stratified_kfold(x_data, y_data, k=k):
# label_encoder = LabelEncoder()
# y_train_cv = label_encoder.fit_transform(y_train_cv)
# y_train_cv = np.reshape(y_train_cv, (-1, 1))
# y_train_cv = keras.utils.to_categorical(y_train_cv, 2)
#
# y_validation = label_encoder.transform(y_validation)
# y_validation = np.reshape(y_validation, (-1, 1))
# y_validation = keras.utils.to_categorical(y_validation, 2)
# model.fit(x=x_train_cv,
# y=y_train_cv,
# batch_size=batch_size,
# epochs=n_epochs,
# validation_data=(x_validation, y_validation),
# verbose=0,
# shuffle=True)
# score = model.evaluate(x_validation, y_validation, verbose=0)
# print("Stratified %d-fold %s %s: %.2f%%" % (
# k, compound.split(".")[0], model.metrics_names[1], score[1] * 100))
# cross_validation_scores.append(score[1] * 100)
# model.save(filepath="../Results/Classification/%s.h5" % compound.split(".")[0])
# np.savetxt(fname=logger_path + log_name, X=np.array(cross_validation_scores), delimiter=',')
# plt.plot(cross_validation_scores, label="%d-fold cross validation")
# result = pd.read_csv(logger_path, delimiter=',')
# plt.xlabel("Folds")
# plt.ylabel("Accuracy")
# plt.xticks([i for i in range(0, n_epochs + 5, 5)], rotation=90)
# plt.yticks(np.arange(0, 1.05, 0.05).tolist())
# plt.title(compound.split(".")[0])
# plt.grid()
# plt.legend(loc="upper right")
# plt.savefig("../Results/Classification/images/%s.png" % compound.split(".")[0])
# plt.close("all")
print("Finished!")
def encode_labels(y_data, n_classes=2):
label_encoder = LabelEncoder()
y_data = label_encoder.fit_transform(y_data)
y_data = np.reshape(y_data, (-1, 1))
y_data = keras.utils.to_categorical(y_data, n_classes)
return y_data
def plot_results(path="../Results/Classification/"):
logs = os.listdir(path)
print(logs)
for log in logs:
if os.path.isfile(path + log) and log.endswith(".log"):
result =
|
pd.read_csv(path + log, delimiter=',')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import pandas as pd
import pyarrow as pa
# df = pd.DataFrame()
# Sample Data
raw_data = {'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'last_name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze'],
'age': [42, 52, 36, 24, 73],
'preTestScore': [4, 24, 31, 2, 3],
'postTestScore': [25, 94, 57, 62, 70]}
# Create Dataframe for the Sample Data
df =
|
pd.DataFrame(raw_data, columns = ['first_name', 'last_name', 'age', 'preTestScore', 'postTestScore'])
|
pandas.DataFrame
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from torch.utils.data import Dataset
import os
import pandas as pd
import pdb
import numpy as np
import math
import pickle
import random
from sklearn.utils import shuffle
class FinalTCGAPCAWG(Dataset):
def __init__(self, dataset_name = None,
data_dir=None,
mode='training',
curr_fold=1,
block_size=5000,
load=False,
addtriplettoken=False,
addpostoken=False,
addgestoken=False,
addrt=False,
nummut = 0,
frac = 0,
crossdata=False,
crossdatadir=None,
pcawg2tgca_class=False,
tcga2pcawg_class=False,
mutratio = '1-0-0-0-0-0',
adddatadir = None):
self.dataset_name = dataset_name
self.data_dir=data_dir
self.mode=mode
self.curr_fold=int(curr_fold)
self.block_size=block_size
self.load=load
self.addtriplettoken=addtriplettoken
self.addpostoken=addpostoken
self.addrt=addrt
self.nummut = nummut
self.frac = frac
self.addgestoken = addgestoken
self.crossdata= crossdata
self.crossdatadir = crossdatadir
self.adddatadir = adddatadir
self.pcawg2tgca_class=pcawg2tgca_class
self.tcga2pcawg_class=tcga2pcawg_class
self.NiSi = False
self.SNV = False
self.indel = False
self.SVMEI = False
self.Normal = False
if self.nummut > 0 :
self.block_size = self.nummut
if self.dataset_name == 'finalpcawg':
self.training_fold = pd.read_csv('./notebookpcawg/pcawg_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/pcawg_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'finaltcga':
self.training_fold = pd.read_csv('./notebookpcawg/tcga_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/tcga_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'westcga':
self.training_fold = pd.read_csv('./notebookpcawg/tcgawes_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/tcgawes_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'wgspcawg':
self.training_fold = pd.read_csv('./notebookpcawg/pcawgwgs_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/pcawgwgs_valfold' + str(self.curr_fold) + '.csv',index_col=0)
if self.adddatadir is not None:
adddata = pd.DataFrame(columns=self.validation_fold.columns)
adddata.columns = self.validation_fold.columns
folder = os.listdir(self.adddatadir)
for i in folder:
samples = os.listdir(self.adddatadir + i )
for j in samples:
if j[0:3] == 'new':
counter = pd.read_csv(self.adddatadir + i + '/count_new_' + j[4:],index_col=0)
listall = [i,j[4:]] + counter['0'].values.tolist() + [1]
pds = pd.DataFrame(listall)
pds = pds.T
pds.columns=self.validation_fold.columns
adddata = adddata.append(pds)
adddata = adddata.reset_index(drop=True)
self.adddata = adddata
#self.validation_fold = self.validation_fold.append(self.adddata)
self.validation_fold = self.adddata
self.data_dir = self.adddatadir
self.load_classinfo()
self.vocab_mutation = pd.read_csv('./notebookpcawg/dictMutation.csv',index_col=0)
self.allSNV_index = 0
self.mutratio = mutratio.split('-')
self.mutratio = [float(i) for i in self.mutratio]
if self.mutratio[0]>0:
self.NiSi = True
if self.mutratio[1]>0:
self.SNV = True
if self.mutratio[2]>0:
self.indel = True
if self.mutratio[3]>0:
self.SVMEI = True
if self.mutratio[4]>0:
self.Normal = True
if self.NiSi:
vocabsize = len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='NiSi'])
if self.SNV:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='SNV'])
if self.indel:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='indel'])
if self.SVMEI:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ'].isin(['MEI','SV'])])
if self.Normal:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='Normal'])
self.vocab_size = vocabsize + 1
#print(self.vocab_size)
#pdb.set_trace()
self.pd_position_vocab = pd.read_csv('./notebookpcawg/dictChpos.csv',index_col=0)
self.pd_ges_vocab = pd.read_csv('./notebookpcawg/dictGES.csv',index_col=0)
self.position_size = len(self.pd_position_vocab) + 1
self.ges_size = len(self.pd_ges_vocab) + 1
self.rt_size = 1
self.midstring = '.' + self.dataset_name + str(mutratio) + str(int(self.addtriplettoken)) + str(int(self.addpostoken)) + str(int(self.addgestoken)) + str(int(self.addrt)) + '/'
if self.mode == 'validation':
if self.crossdata:
os.makedirs(self.crossdatadir + self.midstring, exist_ok=True)
self.data_dir = self.crossdatadir
#pdb.set_trace()
else:
os.makedirs(self.data_dir + self.midstring, exist_ok=True)
def load_classinfo(self):
if self.dataset_name == 'finalpcawg':
num_class = os.listdir(self.data_dir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.data_dir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_info = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
else:
num_class = os.listdir(self.data_dir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.data_dir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_info = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
if self.crossdata:
self.crossdatadir = self.data_dir
num_class = os.listdir(self.crossdatadir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.crossdatadir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_infoto = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
self.pd_class_crossdata = pd.read_csv('./extfile/crossdata.csv',index_col =0)
#pdb.set_trace()
def get_data(self,idx):
if self.mode=='training':
instances=self.training_fold.iloc[idx]
elif self.mode=='validation':
instances=self.validation_fold.iloc[idx]
elif self.mode == 'testing':
instances=self.test_data.iloc[idx]
#if self.prioritize:
# instances=self.training_fold.loc[self.training_fold['samples']=='f8593ac0-9480-22a0-e040-11ac0d48697a.csv']
# instances=instances.iloc[0]
target_name = instances['nm_class']
#if self.crossdata:
# target_name = self.pd_class_crossdata.loc[self.pd_class_crossdata['tcga_class']==target_name]['class_name'].to_list()[0]
samples = instances[1]
avail_count = np.asarray(self.mutratio) * self.block_size
row_count = instances[['NiSi','SNV','indel','SVMEI','Normal']].to_numpy()
diff = avail_count - row_count
pos = diff>0
avail_count1 = row_count * pos
diff = row_count > avail_count
avail_count2 = avail_count * diff
avail_count3 = avail_count1 + avail_count2
shadowavail_count3 = avail_count3
shadowavail_count3[0] = row_count[0]
if sum(shadowavail_count3) > self.block_size:
diff = self.block_size - sum(avail_count3)
shadowavail_count3[0] = diff + avail_count3[0]
avail_count2 = shadowavail_count3.astype(int)
if avail_count2[0]<0:
secondmax = avail_count2[np.argmax(avail_count2)]
avail_count2 = avail_count2 * 0.7
avail_count = avail_count2
diff = avail_count - row_count
pos = diff>0
avail_count1 = row_count * pos
diff = row_count > avail_count
avail_count2 = avail_count * diff
avail_count3 = avail_count1 + avail_count2
shadowavail_count3 = avail_count3
shadowavail_count3[0] = row_count[0]
if sum(shadowavail_count3) > self.block_size:
diff = self.block_size - sum(avail_count3)
shadowavail_count3[0] = diff + avail_count3[0]
avail_count2 = shadowavail_count3.astype(int)
avail_count = avail_count2
def grab(pd_input,grabcol):
return pd_input[grabcol]
def allgrab(grabcol):
if self.NiSi:
#pdb.set_trace()
pd_nisi = pd.read_csv(self.data_dir + target_name + '/' + 'NiSi_new_' + samples,index_col=0)
pd_nisi = pd_nisi.sample(n = avail_count[0], replace = False)
pd_nisi = grab(pd_nisi,grabcol)
if self.SNV:
pd_SNV = pd.read_csv(self.data_dir + target_name + '/' + 'SNV_new_' + samples,index_col=0)
pd_SNV = pd_SNV.sample(n = avail_count[1], replace = False)
pd_SNV = grab(pd_SNV,grabcol)
pd_nisi = pd_nisi.append(pd_SNV)
if self.indel:
pd_indel = pd.read_csv(self.data_dir + target_name + '/' + 'indel_new_' + samples,index_col=0)
pd_indel = pd_indel.sample(n = avail_count[2], replace = False)
pd_indel = grab(pd_indel,grabcol)
pd_nisi = pd_nisi.append(pd_indel)
if self.SVMEI:
pd_meisv = pd.read_csv(self.data_dir + target_name + '/' + 'MEISV_new_' + samples,index_col=0)
pd_meisv = pd_meisv.sample(n = avail_count[3], replace = False)
pd_meisv = grab(pd_meisv,grabcol)
pd_nisi = pd_nisi.append(pd_meisv)
if self.Normal:
pd_normal = pd.read_csv(self.data_dir + target_name + '/' + 'Normal_new_' + samples,index_col=0)
pd_normal = pd_normal.sample(n = avail_count[4], replace = False)
pd_normal = grab(pd_normal,grabcol)
pd_nisi = pd_nisi.append(pd_normal)
pd_nisi = pd_nisi.fillna(0)
return pd_nisi
if self.addtriplettoken:
if self.mode=='training' :
pd_nisi = allgrab(['triplettoken'])
else:
filename = self.data_dir + self.midstring + 'val_' + samples
if os.path.isfile(filename):
try:
pd_nisi = pd.read_csv(filename,index_col=0)
except:
pd_nisi = allgrab(['triplettoken'])
pd_nisi = pd_nisi.dropna()
pd_nisi.to_csv(filename)
else:
pd_nisi = allgrab(['triplettoken'])
pd_nisi.to_csv(filename)
#pdb.set_trace()
if self.addpostoken:
if self.mode=='training' :
pd_nisi = allgrab(['triplettoken','postoken'])
else:
#pdb.set_trace()
filename = self.data_dir + self.midstring + 'val_' + samples
if os.path.isfile(filename):
try:
pd_nisi =
|
pd.read_csv(filename,index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 11:00:16 2020
@author: DWXMG
"""
# import importlib.util
# import sys
# from collections import namedtuple, OrderedDict
from pathlib import Path
import itertools
# from itertools import combinations
# import operator
import datetime as dt
import random
from math import pi
# import copy
# import pickle
import pandas as pd
import numpy as np
from cmath import phase
import matplotlib.pyplot as plt
from matplotlib.offsetbox import (
AnchoredOffsetbox,
DrawingArea,
HPacker,
TextArea,
) # TODO
from scipy import stats
from scipy.stats import linregress
from scipy.optimize import curve_fit
# from lmfit import Parameters, conf_interval, Minimizer, minimize
# import numdifftools
# import corner
from file_py_helper.file_functions import FileOperations
from .validation import get_KKvalid, prep_GP_DRT_raw_data
from .models import Model_Collection
from .plotting import (
plot_linKK,
EIS_Trimming_plot,
EIS_plotting_per_EV,
plot_lin_Warburg,
)
from .DRT_DP_fitting import DP_DRT_analysis
from .GP_DRT_fitting import run_GP_DRT_fit
# from .plotting import EIS_plotting_per_EV, EIS_Trimming_plot, EIS_plotting_EvRHE
# _logger = start_logging(__name__)
import logging
_logger = logging.getLogger(__name__)
# fit_export_templ = namedtuple('fit_export_templ', 'fit_spectra fit_pars meta_index')
# Meta = namedtuple('Meta', 'PAR_file Segment E_dc_RHE E_dc_RHE_mV RPM_DAC data ovv')
globals()["EvRHE"] = "E_AppV_RHE"
def func_lin(a):
def func(x, b):
return a * x + b
return func
def fitting_recheck_params(fit_run_arg, modname, params_model, **EIS_fit_kwargs):
# PF,E,RPM = str(fit_run_arg.PAR_file), fit_run_arg.E_dc_RHE, fit_run_arg.RPM_DAC
_key = (
str(fit_run_arg[0]),
int(fit_run_arg[1]),
*[float(i) for i in fit_run_arg[2:4]],
int(fit_run_arg[4]),
modname,
)
# (PF,E,RPM,modname)
_get_params = pd.DataFrame()
# ('/mnt/DATA/EKTS_CloudStation/CloudStation/Experimental data/Raw_data/VERSASTAT/2019-05-May/06.05.2019_0.1MH2SO4_cell2/O2_EIS-range_1500rpm_JOS2_288.par',
# 4, 0.708, 708.0, 1500, 'Model(Randles_RQRQ)')
# ['PAR_file',EvRHE,'RPM_DAC','Model_EEC']
bad_grp, good_grp = EIS_fit_kwargs.get("EIS_recheck_bad_fits"), EIS_fit_kwargs.get(
"EIS_recheck_good_fits"
)
sugg_grp = EIS_fit_kwargs.get("EIS_recheck_bad_fits_suggestions")
recheck_msg = ""
if all([len(i.groups) > 0 for i in [bad_grp, good_grp, sugg_grp]]):
if [i for i in good_grp.groups if _key == i]:
recheck_msg += "Prefit recheck in good keys"
_get_params = good_grp.get_group(_key)
elif [i for i in bad_grp.groups if _key == i]:
recheck_msg += f"Prefit recheck in bad keys {_key} and"
_sugg_match = [i for i in sugg_grp.groups if _key == i]
if _sugg_match:
recheck_msg += " taking suggestions."
_get_params = sugg_grp.get_group(_key)
else:
recheck_msg += " not in suggestions."
# _logger.warning(f'Prefit recheck bad key {_key} not in suggestions')
else:
recheck_msg += f"Prefit recheck keys not in good or bad {_key}"
else:
recheck_msg += f"Prefit recheck empty frames"
# _logger.warning(recheck_msg)
return _get_params, recheck_msg
#%%
def make_prefit_frame(
EIS_data_KKvalid,
lmfitting,
prefix="pp",
plot=False,
check_err=True,
norm=np.array([]),
get_frame=False,
):
# norm = 1/(Z_KKv.real**2+Z_KKv.imag**2)
# abs(Z_KKv)
# norm = 1
# norm = np.sqrt(EIS_data_KKvalid.DATA_weightsmod_Z.values)
# norm = 1/abs(Z_KKv)
# norm = 1/np.sqrt(EIS_data_KKvalid.DATA_weightsmod_Z.values)
# lmfitting = best_trial
# EIS_data_KKvalid, lmfitting = _spectrum.EIS_data_KKvalid,best_trial_weights # FIXME
# make_prefit_frame(EIS_data_KKvalid, out, plot = 'Y')
if np.array([]).any() == False:
norm = np.array([1] * len(lmfitting.best_fit.real))
if "DataFrame" in type(EIS_data_KKvalid).__name__:
Z_KKv = EIS_data_KKvalid.DATA_Z.values
elif "array" in type(EIS_data_KKvalid).__name__:
Z_KKv = EIS_data_KKvalid
EIS_data_KKvalid =
|
pd.DataFrame(Z_KKv)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
def is_type(v, t):
return isinstance(v, t) or issubclass(type(v), t)
def list_of_instance(arr, type):
if isinstance(arr, (list, tuple, set)):
return all([is_type(v, type) for v in arr])
return False
def dict_of_instance(data, key_type, value_type):
if isinstance(data, dict):
return list_of_instance(list(data.keys()), key_type) and list_of_instance(list(data.values()), value_type)
return False
def cast_output_numpy(dtype, arr: np.ndarray):
if dtype == "integer":
return arr.astype(int)
elif dtype == "boolean":
return arr.astype(bool)
else:
return arr
def cast_output(dtype, value):
from stateful.event.event import Event
if isinstance(value, Event) and value.isna():
return np.NaN
elif not isinstance(value, Event) and pd.isna(value):
return np.NaN
elif isinstance(value, np.ndarray):
return cast_output_numpy(dtype, value)
elif dtype == "integer":
return int(value)
elif dtype == "boolean":
return bool(value)
else:
return value
def infer_dtype(value):
from pandas.api.types import infer_dtype as infer
return
|
infer([value])
|
pandas.api.types.infer_dtype
|
"""
script to extract all labels from xml files from CVAT.
Note: these script will break if the column names is not matched with current ones;
the categorize_impacts function will need to be updated with more labels come in.
Author: @developmentseed
Run:
python3 xmls_to_df.py --xml_path=TA25 --csv_out=labeled_aiaia.csv
"""
import sys
import os
from os import path as op
import xml.etree.ElementTree as etree
import pandas as pd
import argparse
def parse_xml_attributes(xml):
"""parse xml and get all the attributes
Args:
xml: xml file contain bbox and label information
Returns:
attributes (list): a list of extracted attributes
"""
attributes = []
root= etree.parse(xml).getroot()
image_entries = root.findall('image')
for image in image_entries:
width = int(image.get('width'))
height = int(image.get('height'))
for bb in image.findall('box'):
image_id = image.get('name')
label= bb.get('label')
bbox = [float(bb.get(coord_key)) for coord_key in [ 'xtl', 'ytl', 'xbr', 'ybr']]
attributes.append([image_id, label, bbox])
return attributes
def dataframe_attributes(attributes, columns=None):
"""format attributes into pandas dataframe with column nane
Args:
attributes(list): a list of attributes
columns: column names to be written in the dataframe
"""
df = pd.DataFrame(attributes, columns=columns)
return df
def df_all_attributes(xmls, columns = ['image_id', 'label','bbox']):
"""format all attribute to a collective padas dataframe
Args:
xmls(xml): xml files
columns: pandas dataframe column names
Returns:
df_all: pandas dataframe saved attributes in designated columns
"""
dfs = []
for xml in xmls:
attris = parse_xml_attributes(xml)
df = dataframe_attributes(attris, columns=columns)
dfs.append(df)
df_all =
|
pd.concat(dfs)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import warnings
# warnings.filterwarnings('ignore')
# In[2]:
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import sparse
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
import pickle
# # Amazon Employee Access Challenge
# In[3]:
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
# In[4]:
train.shape
# In[5]:
test.shape
# In[6]:
y_train = train['ACTION']
# In[7]:
y_train.shape
# In[8]:
train_data = train.drop('ACTION', axis=1)
train_data.shape
# In[9]:
test_data = test.drop('id', axis=1)
test_data.shape
# ## Common Variables
# In[10]:
# define variables
random_state = 42
cv = 5
scoring = 'roc_auc'
verbose=2
# ## Common functions
# In[11]:
def save_submission(predictions, filename):
'''
Save predictions into csv file
'''
global test
submission = pd.DataFrame()
submission["Id"] = test["id"]
submission["ACTION"] = predictions
filepath = "result/sampleSubmission_"+filename
submission.to_csv(filepath, index = False)
# In[12]:
def print_graph(results, param1, param2, xlabel, ylabel, title='Plot showing the ROC_AUC score for various hyper parameter values'):
'''
Plot the graph
'''
plt.plot(results[param1],results[param2]);
plt.grid();
plt.xlabel(xlabel);
plt.ylabel(ylabel);
plt.title(title);
# In[13]:
def get_rf_params():
'''
Return dictionary of parameters for random forest
'''
params = {
'n_estimators':[10,20,50,100,200,500,700,1000],
'max_depth':[1,2,5,10,12,15,20,25],
'max_features':[1,2,3,4,5],
'min_samples_split':[2,5,7,10,20]
}
return params
# In[14]:
def get_xgb_params():
'''
Return dictionary of parameters for xgboost
'''
params = {
'n_estimators': [10,20,50,100,200,500,750,1000],
'learning_rate': uniform(0.01, 0.6),
'subsample': uniform(),
'max_depth': [3, 4, 5, 6, 7, 8, 9],
'colsample_bytree': uniform(),
'min_child_weight': [1, 2, 3, 4]
}
return params
# ### We will try following models
#
# 1. KNN
# 2. SVM
# 3. Logistic Regression
# 4. Random Forest
# 5. Xgboost
# ## Build Models on the raw data
# ## 1.1 KNN with raw features
# In[15]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[16]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[17]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[18]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[19]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_data,y_train)
# In[20]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, "knn_raw.csv")
# 
# ## 1.2 SVM with raw feature
# In[21]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[22]:
best_c=best_model.best_params_['C']
best_c
# In[23]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[24]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[25]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_data,y_train)
# In[26]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'svm_raw.csv')
# 
# ## 1.3 Logistic Regression with Raw Feature
# In[27]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[28]:
best_c=best_model.best_params_['C']
best_c
# In[29]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[30]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[31]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_data,y_train)
# In[32]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'lr_raw.csv')
# 
# ## 1.4 Random Forest with Raw Feature
# In[33]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[34]:
results =
|
pd.DataFrame(best_model.cv_results_)
|
pandas.DataFrame
|
import re
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup, Comment
from profootballref.Tools import Loader
class PlayerParser:
def __init__(self):
pass
def parse_general_info(self, html):
# initialize a dict to hold stats/metrics not contained in tabular form
general_stats = {}
# scrape data off the players page not contained in the stats table
general_stats['name'] = re.compile('<h1 itemprop="name">\\n\\t\\t<span>(.*)<\/span>').findall(html)[0]
try:
general_stats['position'] = re.compile('<strong>Position<\/strong>:\W([a-zA-Z]{1,})').findall(html)[0]
except:
general_stats['position'] = np.nan
try:
general_stats['throws'] = re.compile('<strong>Throws:<\/strong>\\n\\t\\t(.*?)\\n\\t\\n<\/p>').findall(html)[0]
except:
general_stats['throws'] = np.nan
# convert height to inches
height = re.compile('<span itemprop="height">(.*?)<\/span>').findall(html)[0]
general_stats['height'] = int(height.split('-')[0]) * 12 + int(height.split('-')[1])
general_stats['weight'] = int(re.compile('<span itemprop="weight">([0-9]{1,3})lb<\/span>').findall(html)[0])
# break up DOB into seperate cols for m/d/y
bday = re.compile('<span itemprop="birthDate" id="necro-birth" data-birth="(.*?)">').findall(html)[0]
general_stats['bday_mo'] = int(bday.split('-')[1])
general_stats['bday_day'] = int(bday.split('-')[2])
general_stats['bday_yr'] = int(bday.split('-')[0])
try:
general_stats['college'] = re.compile('<a href="\/schools\/\w+.*?\/">(.*?)<\/a>').findall(html)[0]
except:
general_stats['college'] = np.nan
return general_stats
def receiving(self, url=None, **kwargs):
# We generally pass in a url and then load the page, for testing the function allow html to be passed in
if url:
response = Loader.Loader().load_page(url)
html = response.text
else:
for k, v in kwargs.items():
if k == 'html':
html = v
#Scrape general stats
general_stats = self.parse_general_info(html)
# Here we test to see if the player page being called is for a receiver or running back. Since the dataframe
# structure is the same for both positions, we'll call one or the other. If the position is anything else, we
# wont try to parse it
parseablePositions = ['TE', 'WR']
if not any(x in general_stats['position'] for x in parseablePositions):
if any(x in general_stats['position'] for x in ['RB', 'FB']):
print(url, " is a ", general_stats['position'], " calling rushing method instead")
df = self.rushing(url)
else:
print(url, " is not a receiver we can parse so we're skipping this player")
return pd.DataFrame()
else:
# load the stats table into pandas dataframe. Using 'df' as the variable name to signify it's a pd.DataFrame.
df = pd.read_html(html)[0]
df = df.iloc[:,:27]
# rename columns from origional multirow colums
cols = ['Year', 'Age', 'Tm', 'Pos', 'No', 'G', 'GS', 'Tgt', 'Rec', 'Rec_Yds', 'Y/R', 'Rec_TD', 'Rec_Lng',
'R/G', 'Rec_Y/G', 'Ctch%', 'Rush', 'Rush_Yds', 'Rush_TD', 'Rush_Lng', 'Y/A', 'Rush_Y/G', 'A/G',
'YScm', 'RRTD', 'Fmb', 'AV']
try:
df.columns = cols
except ValueError:
print('Column mismatch, check url: ', url, 'skipping and returning blank DF')
return pd.DataFrame()
# remove the career totals row
df['Age'] = pd.to_numeric(df['Age'], errors='coerce')
df = df[~df['Age'].isna()]
# remove spec characters that are sometimes added to the year to indicate probowl, all pro etc
df['Year'] = df['Year'].str.replace('+', '')
df['Year'] = df['Year'].str.replace('*', '')
# some players have multiple rows w.o a year if they played on more than 1 team in that year
df['Year'] = df['Year'].astype(str)
df = df[df.Year != 'nan']
df['Year'] = pd.to_numeric(df['Year'])
# sometimes this field is blank, so we convert the nan to an empty string so we can parse further
df['Ctch%'] = df['Ctch%'].astype(str)
df['Ctch%'] = df['Ctch%'].fillna('')
# remove % sign on ctch% and convert to float
df['Ctch%'] = df['Ctch%'].str.replace('%', '')
df['Ctch%'] = pd.to_numeric(df['Ctch%'], errors='coerce')
# uppercase some qualitatives
df['Tm'] = df['Tm'].str.upper()
# Insert general scraped info from player page
df['Name'] = general_stats['name']
df['Throws'] = general_stats['throws']
df['Height'] = general_stats['height']
df['Weight'] = general_stats['weight']
df['DOB_mo'] = general_stats['bday_mo']
df['DOB_day'] = general_stats['bday_day']
df['DOB_yr'] = general_stats['bday_yr']
df['College'] = general_stats['college']
# This is hacky but position info isn't always contained in every row
if df['Pos'].isnull().values.any():
df['Pos'] = general_stats['position']
df['Pos'] = df['Pos'].str.upper()
# rearange the dataframe columns, this is personal preference
df = df[['Name', 'Year', 'Age', 'Throws', 'Height', 'Weight', 'DOB_mo', 'DOB_day', 'DOB_yr', 'College',
'Tm', 'Pos', 'No', 'G', 'GS', 'Tgt', 'Rec', 'Rec_Yds', 'Y/R', 'Rec_TD', 'Rec_Lng', 'R/G',
'Rec_Y/G', 'Ctch%', 'Rush', 'Rush_Yds', 'Rush_TD', 'Rush_Lng', 'Y/A', 'Rush_Y/G', 'A/G', 'YScm',
'RRTD', 'Fmb', 'AV']]
return df
def rushing(self, url=None, **kwargs):
# We generally pass in a url and then load the page, for testing the function allow html to be passed in
if url:
response = Loader.Loader().load_page(url)
html = response.text
else:
for k, v in kwargs.items():
if k == 'html':
html = v
# Scrape general stats
general_stats = self.parse_general_info(html)
# Here we test to see if the player page being called is for a running back or a receiver. Since the dataframe
# structure is the same for both positions, we'll call one or the other. If the position is anything else, we
# wont try to parse it
parseablePositions = ['RB', 'FB']
if not any(x in general_stats['position'] for x in parseablePositions):
if any(x in general_stats['position'] for x in ['WR', 'TE']):
print(url, " is a ", general_stats['position'], " calling receiving method instead")
df = self.receiving(url)
else:
print(url, " is not a receiver we can parse so we're skipping this player")
return pd.DataFrame()
#return pd.DataFrame()
else:
# load the stats table into pandas dataframe. Using 'df' as the variable name to signify it's a pd.DataFrame.
df = pd.read_html(html)[0]
# drop a nunch of unused columns
df = df.iloc[:, 0:27]
# rename columns from origional multirow colums
cols = ['Year', 'Age', 'Tm', 'Pos', 'No', 'G', 'GS', 'Rush', 'Rush_Yds', 'Rush_TD', 'Rush_Lng', 'Y/A',
'Rush_Y/G', 'A/G', 'Tgt', 'Rec', 'Rec_Yds', 'Y/R', 'Rec_TD', 'Rec_Lng', 'R/G', 'Rec_Y/G', 'Ctch%',
'YScm', 'RRTD', 'Fmb', 'AV']
try:
df.columns = cols
except ValueError:
print('Column mismatch, check url: ', url, 'skipping and returning blank DF')
return pd.DataFrame()
# remove the career totals row
df['Age'] = pd.to_numeric(df['Age'], errors='coerce')
df = df[~df['Age'].isna()]
# remove spec characters that are sometimes added to the year to indicate probowl, all pro etc
df['Year'] = df['Year'].str.replace('+', '')
df['Year'] = df['Year'].str.replace('*', '')
# some players have multiple rows w.o a year if they played on more than 1 team in that year
df['Year'] = df['Year'].astype(str)
df = df[df.Year != 'nan']
df['Year'] = pd.to_numeric(df['Year'])
# sometimes this field is blank, so we convert the nan to an empty string so we can parse further
df['Ctch%'] = df['Ctch%'].astype(str)
df['Ctch%'] = df['Ctch%'].fillna('')
# remove % sign on ctch% and convert to float
df['Ctch%'] = df['Ctch%'].str.replace('%', '')
df['Ctch%'] = pd.to_numeric(df['Ctch%'], errors='coerce')
# uppercase some qualitatives
df['Tm'] = df['Tm'].str.upper()
# Insert general scraped info from player page
df['Name'] = general_stats['name']
df['Throws'] = general_stats['throws']
df['Height'] = general_stats['height']
df['Weight'] = general_stats['weight']
df['DOB_mo'] = general_stats['bday_mo']
df['DOB_day'] = general_stats['bday_day']
df['DOB_yr'] = general_stats['bday_yr']
df['College'] = general_stats['college']
# This is hacky but position info isn't always contained in every row
if df['Pos'].isnull().values.any():
df['Pos'] = general_stats['position']
df['Pos'] = df['Pos'].str.upper()
# rearange the dataframe columns, this is personal preference
df = df[['Name', 'Year', 'Age', 'Throws', 'Height', 'Weight', 'DOB_mo', 'DOB_day', 'DOB_yr', 'College',
'Tm', 'Pos', 'No', 'G', 'GS', 'Tgt', 'Rec', 'Rec_Yds', 'Y/R', 'Rec_TD', 'Rec_Lng', 'R/G',
'Rec_Y/G', 'Ctch%', 'Rush', 'Rush_Yds', 'Rush_TD', 'Rush_Lng', 'Y/A', 'Rush_Y/G', 'A/G', 'YScm',
'RRTD', 'Fmb', 'AV']]
return df
def passing(self, url=None, **kwargs):
# We generally pass in a url and then load the page, for testing the function allow html to be passed in
if url:
response = Loader.Loader().load_page(url)
html = response.text
else:
for k, v in kwargs.items():
if k == 'html':
html = v
# Scrape general stats
general_stats = self.parse_general_info(html)
# Ensure we're only parsing QB's
parseablePositions = ['QB']
if not any(x in general_stats['position'] for x in parseablePositions):
print(url, " is not a quarterback we can parse so we're skipping this player")
return pd.DataFrame()
else:
# load the stats table into pandas dataframe. Using 'df' as the variable name to signify it's a
# pd.DataFrame.
df = pd.read_html(html)[0]
# remove the career totals row
df['Age'] = pd.to_numeric(df['Age'], errors='coerce')
df = df[~df['Age'].isna()]
# remove spec characters that are sometimes added to the year to indicate probowl, all pro etc
df['Year'] = df['Year'].str.replace('+', '')
df['Year'] = df['Year'].str.replace('*', '')
# some players have multiple rows w.o a year if they played on more than 1 team in that year
df = df[df.Year != 'nan']
df['Year'] = pd.to_numeric(df['Year'])
df['GS'] = pd.to_numeric(df['GS'])
# Insert general scraped info from player page
df['Pos'] = general_stats['position']
df['Name'] = general_stats['name']
df['Throws'] = general_stats['throws']
df['Height'] = general_stats['height']
df['Weight'] = general_stats['weight']
df['DOB_mo'] = general_stats['bday_mo']
df['DOB_day'] = general_stats['bday_day']
df['DOB_yr'] = general_stats['bday_yr']
df['College'] = general_stats['college']
# uppercase some qualitatives
df['Tm'] = df['Tm'].str.upper()
df['Pos'] = df['Pos'].str.upper()
# Parse out rushing and receiving information and append to the passing info
soup = BeautifulSoup(html, 'lxml')
# parse out the chunk of rushing and receiving info from the html comments
rush_cols = ['Year', 'Age', 'Tm', 'Pos', 'No.', 'G', 'GS', 'Rush', 'Rush_Yds', 'Rush_TD',
'Rush_Lng', 'Rush_Y/A', 'Rush_Y/G', 'A/G', 'Tgt', 'Rec', 'Rec_Yds', 'Y/R', 'Rec_TD', 'Rec_Lng',
'R/G', 'Rec_Y/G', 'Ctch%', 'YScm', 'RRTD', 'Fmb']
# we need to keep track of if we actually found rushing info
found = False
#Rushing info for QBs is commented out unless java is enabled, so search comments
for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):
if 'id="div_rushing_and_receiving">' in comment:
new_html = comment
rush_df = pd.read_html(new_html)[0]
rush_df = rush_df.iloc[:, :26]
try:
rush_df.columns = rush_cols
except ValueError:
print('Column mismatch, check url: ', url)
# munge the columns similar to above
# remove the career totals row
rush_df['Age'] = pd.to_numeric(rush_df['Age'], errors='coerce')
rush_df = rush_df[~rush_df['Age'].isna()]
# remove spec characters that are sometimes added to the year to indicate probowl, all pro etc
rush_df['Year'] = rush_df['Year'].str.replace('+', '')
rush_df['Year'] = rush_df['Year'].str.replace('*', '')
# some players have multiple rows w.o a year if they played on more than 1 team in that year
rush_df = rush_df[rush_df.Year != 'nan']
rush_df['Year'] = pd.to_numeric(rush_df['Year'])
# This is hacky but position info isn't always contained in every row
rush_df['Pos'] = general_stats['position']
# uppercase some qualitatives
rush_df['Tm'] = rush_df['Tm'].str.upper()
rush_df['Pos'] = rush_df['Pos'].str.upper()
# Ensure that we know we have the rushing info we're looking for
found = True
# if we didn't get any rushing info, create an empty df
if not found:
rush_df = pd.DataFrame(columns=rush_cols)
# merge the two DataFrames on overlapping columns and return
combined_df =
|
pd.merge(df, rush_df, on=['Year', 'Age', 'Tm', 'Pos', 'No.', 'G', 'GS'], how='left')
|
pandas.merge
|
from io import StringIO, BytesIO
import base64
import pandas as pd
from os.path import join, dirname
population_colors = pd.read_csv(join(dirname(__file__), '../data/colors.csv')) # TODO add more colors
def extract_file(file_source, typ='txt'):
filename = file_source.data['file_name'][0]
raw_contents = file_source.data['file_contents'][0]
# remove the prefix that JS adds
prefix, b64_contents = raw_contents.split(",", 1)
file_contents = base64.b64decode(b64_contents)
file_io = StringIO(bytes.decode(file_contents)) if typ == 'txt' else BytesIO(file_contents)
return file_io, filename
def file_callback_tree(file_source, dropdown_value, viz_df, tree, main_source): # TODO file check
file_io, filename = extract_file(file_source)
df = pd.read_csv(file_io)
if dropdown_value == 'coordinates':
tree['coordinates'] = df
viz_df['x'] = tree['coordinates'].iloc[:, 1].values
viz_df['y'] = tree['coordinates'].iloc[:, 2].values
viz_df['populationID'] = -1
main_source.data = viz_df.to_dict(orient='list')
elif dropdown_value == 'edges':
tree['edges'] = df
return viz_df, tree
def file_callback_populations(file_source, viz_df, main_source): # TODO file check
file_io, filename = extract_file(file_source)
text = list(iter(file_io.getvalue().splitlines()))
viz_df['populationID'] = -1
pops =
|
pd.DataFrame()
|
pandas.DataFrame
|
import re
import config
import pandas as pd
from pandas.core.frame import DataFrame
from io import BytesIO
import matplotlib.pyplot as plt
from utils.LokiLogger import Logger
from utils.RequestsHelper import getRequestWrapper
from utils.Exceptions import EmptyHTTPResponseException
class AlphaVantage:
def __init__(self, api_key: str, request_interval: int):
self.api_key = api_key
self.request_interval = request_interval
self.base_url = config.alphavantage
self.logging = Logger.getLogger(__name__)
def getQuote(self, symbol: str) -> DataFrame:
params = {
"function" : "GLOBAL_QUOTE"
, "symbol" : symbol
, "apikey" : self.api_key
, "datatype" : "csv"
}
msg = f"getQuote() failed for symbol {symbol}"
response = getRequestWrapper(logging=self.logging, url=self.base_url, params=params, msg=msg)
df = pd.read_csv(BytesIO(response.content))
if df is None or len(df.index) == 0:
raise EmptyHTTPResponseException("getQuote() returned empty, please validate request")
if "Error Message" in df.iloc[0][0]:
raise EmptyHTTPResponseException("getQuote() invalid API call, please validate request")
return df
def getEarnings(self, symbol: str) -> DataFrame:
params = {
"function" : "EARNINGS_CALENDAR"
, "symbol" : symbol
, "apikey" : self.api_key
, "datatype" : "csv"
}
msg = f"getEarnings() failed for symbol {symbol}"
response = getRequestWrapper(logging=self.logging, url=self.base_url, params=params, msg=msg)
df = pd.read_csv(BytesIO(response.content))
if df is None or len(df.index) == 0:
raise EmptyHTTPResponseException(f"getEarnings() returned empty for {symbol}, please validate request")
return df
def getUpcomingIPOs(self) -> DataFrame:
params = {
"function" : "IPO_CALENDAR"
, "apikey" : self.api_key
}
msg = "getUpcomingIPOs() failed to fetch IPOs"
response = getRequestWrapper(logging=self.logging, url=self.base_url, params=params, msg=msg)
df = pd.read_csv(BytesIO(response.content))
if df is None or len(df.index) == 0:
raise EmptyHTTPResponseException("getUpcomingIPOs() returned empty, please validate request")
df[df.columns[1]] = df[df.columns[1]].apply(lambda x: x[:46] + "..." if len(x) > 47 else x) # column: name
return df
def getFXRate(self, from_ccy: str, to_ccy: str) -> DataFrame:
params = {
"function" : "CURRENCY_EXCHANGE_RATE"
, "from_currency" : from_ccy
, "to_currency" : to_ccy
, "apikey" : self.api_key
}
msg = f"getFXRate() failed to fetch conversion from {from_ccy} to {to_ccy}"
response = getRequestWrapper(logging=self.logging, url=self.base_url, params=params, msg=msg)
data = response.json()
data = data.get("Realtime Currency Exchange Rate", None)
if data is None:
print(response.text)
raise EmptyHTTPResponseException(f"getFXRate() errored for {from_ccy} to {to_ccy} rate, please validate request")
df =
|
DataFrame.from_dict(data, orient="index")
|
pandas.core.frame.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
'''
アンサンブル学習
'''
# ライブラリのインポート
import pandas as pd
import numpy as np
from scipy import stats
# 予測データの読み込み
lgb_sub = pd.read_csv('./submit/bank_LightGBM.csv', sep=',', header=None)
xgb_sub = pd.read_csv('./submit/bank_xgboost.csv', sep=',', header=None)
rf_sub = pd.read_csv('./submit/bank_rf.csv', sep=',', header=None)
# 予測データの結合
df = pd.concat([lgb_sub[1],
xgb_sub[1],
rf_sub[1]
],
axis=1)
# アンサンブル学習(平均)
pred = df.mean(axis=1)
'''
提出
'''
# 提出用データの読み込み
sub =
|
pd.read_csv('./data/submit_sample.csv', sep=',', header=None)
|
pandas.read_csv
|
from urllib.request import urlopen
from urllib.error import HTTPError
import json
import pandas as pd
def key_metrics(ticker, api_key, period="annual", TTM=False):
"""
Description
----
Gives information about key metrics of a company overtime which includes
i.a. PE ratio, Debt to Equity, Dividend Yield and Average Inventory.
Input
----
ticker (string)
The company ticker (for example: "NFLX")
api_key (string)
The API Key obtained from https://financialmodelingprep.com/developer/docs/
period (string)
Data period, this can be "annual" or "quarter".
TTM (boolean)
Obtain the trailing twelve months (TTM) key metrics.
Output
----
data (dataframe)
Data with variables in rows and the period in columns.
"""
if TTM:
URL = f"https://financialmodelingprep.com/api/v3/key-metrics-ttm/{ticker}?apikey={api_key}"
else:
URL = f"https://financialmodelingprep.com/api/v3/key-metrics/{ticker}?period={period}&apikey={api_key}"
try:
response = urlopen(URL)
data = json.loads(response.read().decode("utf-8"))
except HTTPError:
raise ValueError("This endpoint is only for premium members. Please visit the subscription page to upgrade the "
"plan (Starter or higher) at https://financialmodelingprep.com/developer/docs/pricing")
if 'Error Message' in data:
raise ValueError(data['Error Message'])
if TTM:
data_formatted = pd.Series(data[0])
else:
data_formatted = {}
for value in data:
if period == "quarter":
date = value['date'][:7]
else:
date = value['date'][:4]
del value['date']
del value['symbol']
data_formatted[date] = value
data_formatted = pd.DataFrame(data_formatted)
return data_formatted
def financial_ratios(ticker, api_key, period="annual", TTM=False):
"""
Description
----
Gives information about the financial ratios of a company overtime
which includes i.a. investment, liquidity, profitability and debt ratios.
Input
----
ticker (string)
The company ticker (for example: "LYFT")
api_key (string)
The API Key obtained from https://financialmodelingprep.com/developer/docs/
period (string)
Data period, this can be "annual" or "quarter".
TTM (boolean)
Obtain the trailing twelve months (TTM) ratios.
Output
----
data (dataframe or series)
Data with variables in rows and the period in columns.
"""
if TTM:
URL = f"https://financialmodelingprep.com/api/v3/ratios-ttm/{ticker}?apikey={api_key}"
else:
URL = f"https://financialmodelingprep.com/api/v3/ratios/{ticker}?period={period}&apikey={api_key}"
try:
response = urlopen(URL)
data = json.loads(response.read().decode("utf-8"))
except HTTPError:
raise ValueError("This endpoint is only for premium members. Please visit the subscription page to upgrade the "
"plan (Starter or higher) at https://financialmodelingprep.com/developer/docs/pricing")
if 'Error Message' in data:
raise ValueError(data['Error Message'])
if TTM:
data_formatted = pd.Series(data[0])
else:
data_formatted = {}
for value in data:
if period == "quarter":
date = value['date'][:7]
else:
date = value['date'][:4]
del value['date']
del value['symbol']
data_formatted[date] = value
data_formatted =
|
pd.DataFrame(data_formatted)
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
Date: 2022/5/12 15:15
Desc: 东方财富个股人气榜
http://guba.eastmoney.com/rank/
"""
import requests
import pandas as pd
def stock_hot_rank_em() -> pd.DataFrame:
"""
东方财富-个股人气榜-人气榜
http://guba.eastmoney.com/rank/
:return: 人气榜
:rtype: pandas.DataFrame
"""
url = "https://emappdata.eastmoney.com/stockrank/getAllCurrentList"
payload = {
"appId": "appId01",
"globalId": "786e4c21-70dc-435a-93bb-38",
"marketType": "",
"pageNo": 1,
"pageSize": 100,
}
r = requests.post(url, json=payload)
data_json = r.json()
temp_rank_df = pd.DataFrame(data_json["data"])
temp_rank_df["mark"] = [
"0" + "." + item[2:] if "SZ" in item else "1" + "." + item[2:]
for item in temp_rank_df["sc"]
]
",".join(temp_rank_df["mark"]) + "?v=08926209912590994"
params = {
"ut": "f057cbcbce2a86e2866ab8877db1d059",
"fltt": "2",
"invt": "2",
"fields": "f14,f3,f12,f2",
"secids": ",".join(temp_rank_df["mark"]) + ",?v=08926209912590994",
}
url = "https://push2.eastmoney.com/api/qt/ulist.np/get"
r = requests.get(url, params=params)
data_json = r.json()
temp_df =
|
pd.DataFrame(data_json["data"]["diff"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
|
assert_frame_equal(unstacked_cols_df['bar'].T, df)
|
pandas.util.testing.assert_frame_equal
|
"""
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
import pytest
from numpy.testing import assert_allclose
from pandas.util.testing import assert_series_equal
from windpowerlib.power_output import (
power_coefficient_curve,
power_curve,
power_curve_density_correction,
)
class TestPowerOutput:
def test_power_coefficient_curve(self):
parameters = {
"wind_speed": pd.Series(data=[2.0, 5.5, 7.0]),
"density": pd.Series(data=[1.3, 1.3, 1.3]),
"rotor_diameter": 80,
"power_coefficient_curve_wind_speeds": pd.Series([4.0, 5.0, 6.0]),
"power_coefficient_curve_values": pd.Series([0.3, 0.4, 0.5]),
}
# Test wind_speed as pd.Series with density and power_coefficient_curve
# as pd.Series and np.array
power_output_exp = pd.Series(
data=[0.0, 244615.399, 0.0], name="feedin_power_plant"
)
assert_series_equal(
power_coefficient_curve(**parameters), power_output_exp
)
parameters["density"] = np.array(parameters["density"])
assert_series_equal(
power_coefficient_curve(**parameters), power_output_exp
)
parameters["power_coefficient_curve_values"] = np.array(
parameters["power_coefficient_curve_values"]
)
parameters["power_coefficient_curve_wind_speeds"] = np.array(
parameters["power_coefficient_curve_wind_speeds"]
)
assert_series_equal(
power_coefficient_curve(**parameters), power_output_exp
)
# Test wind_speed as np.array with density and power_coefficient_curve
# as np.array and pd.Series
power_output_exp = np.array([0.0, 244615.399, 0.0])
parameters["wind_speed"] = np.array(parameters["wind_speed"])
assert_allclose(
power_coefficient_curve(**parameters), power_output_exp
)
assert isinstance(power_coefficient_curve(**parameters), np.ndarray)
parameters["density"] = pd.Series(data=parameters["density"])
assert_allclose(
power_coefficient_curve(**parameters), power_output_exp
)
assert isinstance(power_coefficient_curve(**parameters), np.ndarray)
parameters["power_coefficient_curve_wind_speeds"] = pd.Series(
data=parameters["power_coefficient_curve_wind_speeds"]
)
parameters["power_coefficient_curve_values"] = pd.Series(
data=parameters["power_coefficient_curve_values"]
)
assert_allclose(
power_coefficient_curve(**parameters), power_output_exp
)
assert isinstance(power_coefficient_curve(**parameters), np.ndarray)
def test_power_curve(self):
parameters = {
"wind_speed": pd.Series(data=[2.0, 5.5, 7.0]),
"density":
|
pd.Series(data=[1.3, 1.3, 1.3])
|
pandas.Series
|
# Core imports
import re
import json
import base64
from collections import Counter
# Parsing
import pandas as pd
from bs4 import BeautifulSoup
# Google API
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
# NLP
import nltk
import spacy
from gensim.corpora import Dictionary
from spacy.lang.en.stop_words import STOP_WORDS
from gensim.models.ldamulticore import LdaMulticore
# SA
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def preprocess_string(text: str) -> str:
"""Process a string for purpose of tagging.
Args:
text (string): The email parsed email body.
Returns:
text (string): The processed string.
"""
text = text.strip().lower()
text = text.replace(
'[^a-zA-Z\s]', ''
).replace(
'\s+', ' '
)
text = [text]
df = pd.DataFrame(text, columns=['email_body'])
df['email_body'] = df['email_body'].str.strip().str.lower()
df['email_body'].str.match('\d?\d/\d?\d/\d{4}').all()
df['email_body'] = df['email_body'].str.replace(
'[^a-zA-Z\s]', '').str.replace('\s+', ' ')
text = df['email_body']
return(text)
def tokenize_string(text, nlp):
"""Generate tokens for a given body of text.
Args:
text (string): Processed body of text
nlp: Spacy en_core_web_sm model.
Returns:
tokens (list): List of tokens
"""
tokens = list()
for doc in nlp.pipe(text, batch_size=500):
doc_tokens = []
for token in doc:
if (token.is_stop is False) & (token.is_punct is False):
doc_tokens.append(token.lemma_.lower())
tokens.append(doc_tokens)
return tokens
def generate_tags(tokens: list) -> list:
"""Perform LDA Topic Modelling to aquire tags.
Args:
tokens (list): List of tokens
Returns:
tags_list (list) List of appropriate tags for
given tokens.
"""
id2word = Dictionary(tokens)
corpus = [id2word.doc2bow(d) for d in tokens]
model = LdaMulticore(
corpus=corpus,
id2word=id2word,
random_state=42,
num_topics=10,
passes=2,
workers=1
)
words = [re.findall(r'"([^"]*)"', t[1]) for t in model.print_topics()]
wordcount = Counter(words[0] + words[1] + words[2] + words[3] + words[4])
tags = pd.DataFrame.from_dict(
wordcount, orient='index', columns=['number']
)
tags = tags.drop(tags[tags['number'] <= 1].index)
tags = tags.sort_values(by=['number'], ascending=False).T
tags_list = [word for word in tags.columns]
return tags_list
def generate_sentiment(text):
df =
|
pd.DataFrame(text, columns=['email_body'])
|
pandas.DataFrame
|
#!/usr/bin/env python
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
import dash
from dash.dependencies import Input, Output, State
import dash_table
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import os
import tweepy
import logging
import core.coletar_dados as core_cd
import core.processar_dados as core_pd
# carregar variaveis de ambiente do .env caso exista
from dotenv import load_dotenv
load_dotenv()
# inicializa api do twitter
auth = tweepy.OAuthHandler(os.environ['API_KEY'], os.environ['API_SECRET_KEY'])
auth.set_access_token(os.environ['ACCESS_TOKEN'],
os.environ['ACCESS_TOKEN_SECRET'])
api = tweepy.API(auth)
# inicializando dataframe
df = pd.DataFrame(columns=['user', 'num_likes',
'num_replies', 'num_retweets', 'score'])
# inicializa uma aplicacao em Dash
app = dash.Dash(__name__, title='Interaciômetro | GP2W', external_stylesheets=[dbc.themes.BOOTSTRAP], meta_tags=[{
'http-equiv': 'X-UA-Compatible',
'content': 'IE=edge'
}, {
'name': 'viewport',
'content': 'width=device-width, initial-scale=1.0'
}, {
'name': 'description',
'content': 'Um medidor de interação de perfis no Twitter.'
}, {
'property': 'og:url',
'content': 'https://interaciometro.herokuapp.com/'
}, {
'property': 'og:site_name',
'content': 'Interaciômetro | GP2W'
}, {
'property': 'og:type',
'content': 'website'
}, {
'property': 'og:image',
'content': 'https://interaciometro.herokuapp.com/assets/logo.png'
}, {
'property': 'og:title',
'content': 'Interaciômetro | GP2W'
}, {
'property': 'og:description',
'content': 'Um medidor de interação de perfis no Twitter.'
}, {
'property': 'twitter:title',
'content': 'Interaciômetro | GP2W'
}, {
'property': 'twitter:description',
'content': 'Um medidor de interação de perfis no Twitter.'
}, {
'property': 'twitter:image',
'content': 'https://interaciometro.herokuapp.com/assets/logo.png'
}, {
'property': 'twitter:card',
'content': 'summary_large_image'
},
])
server = app.server # the Flask app
alert = dbc.Alert(["No momento não é possivel realizar novas requisições.",
html.Br(),
"Por favor, tente mais tarde."],
style={
'margin-left':'50px',
'margin-right':'50px'
},
color="danger",
dismissable=True) # use dismissable or duration=5000 for alert to close in x milliseconds
app.layout = html.Div([
html.H2("Interaciômetro"),
html.H5("Digite um usuário do twitter para realizar uma busca"),
html.Div([
"@ ",
dcc.Input(id='user-input', value='', type='text', n_submit=0),
html.Button(id='submit-button-state', type="submit",
n_clicks=0, children='Buscar', style={'margin-left': 10},),
], style={'padding': 10}),
html.Br(),
html.Div(id="the-alert", children=[]),
html.Br(),
dcc.Loading(
id="loading",
type="graph",
children=[
dash_table.DataTable(
id='datatable-row-ids',
columns=[
{'name': 'Usuário', 'id': 'user'},
{'name': 'Likes', 'id': 'num_likes'},
{'name': 'Replies', 'id': 'num_replies'},
{'name': 'Retweets', 'id': 'num_retweets'},
{'name': 'Score', 'id': 'score'},
],
data=df.to_dict('records'),
filter_action="native",
sort_action="native",
sort_mode='multi',
page_action='native',
page_current=0,
page_size=15,
),
html.Div([], style={'margin': 50}),
html.Div(id='datatable-row-ids-container')
]
)
])
@app.callback([Output('the-alert', 'children'),
Output('datatable-row-ids', 'data')],
# n_clicks é somente para a callback ser ativada com o click do botão
[Input('submit-button-state', 'n_clicks')],
[State('user-input', 'value')])
def update_username(n_clicks, username):
# reseta dataframe
df = pd.DataFrame(columns=['user', 'num_likes'])
retornos = [dash.no_update, dash.no_update]
if username != '':
tweets = core_cd.get_tweets(api=api, username=username)
if(tweets is not None):
likes = core_cd.get_likes(api=api, username=username)
likes_df = core_pd.top_users_likes(likes=likes)
replies_df = core_pd.top_users_replies(tweets=tweets)
# tratamento para retirar o próprio usuário das replies
index = replies_df['user'] == username
replies_df = replies_df.drop(replies_df.index[index])
retweets_df = core_pd.top_users_retweets(tweets=tweets)
df = core_pd.score(likes_df, replies_df, retweets_df)
# filtrar scores maiores que 10
index =
|
pd.to_numeric(df['score'])
|
pandas.to_numeric
|
"""
Test AR Model
"""
import datetime as dt
from itertools import product
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import pandas as pd
from pandas import Index, Series, date_range, period_range
from pandas.testing import assert_series_equal
import pytest
from statsmodels.datasets import macrodata, sunspots
from statsmodels.iolib.summary import Summary
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.sm_exceptions import SpecificationWarning, ValueWarning
from statsmodels.tools.tools import Bunch
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.tsa.deterministic import (
DeterministicProcess,
Seasonality,
TimeTrend,
)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.tests.results import results_ar
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
def gen_ar_data(nobs):
rs = np.random.RandomState(982739)
idx = pd.date_range(dt.datetime(1900, 1, 1), freq="M", periods=nobs)
return pd.Series(rs.standard_normal(nobs), index=idx), rs
def gen_ols_regressors(ar, seasonal, trend, exog):
nobs = 500
y, rs = gen_ar_data(nobs)
maxlag = ar if isinstance(ar, int) else max(ar)
reg = []
if "c" in trend:
const = pd.Series(np.ones(nobs), index=y.index, name="const")
reg.append(const)
if "t" in trend:
time = np.arange(1, nobs + 1)
time = pd.Series(time, index=y.index, name="time")
reg.append(time)
if isinstance(ar, int) and ar:
lags = np.arange(1, ar + 1)
elif ar == 0:
lags = None
else:
lags = ar
if seasonal:
seasons = np.zeros((500, 12))
for i in range(12):
seasons[i::12, i] = 1
cols = ["s.{0}".format(i) for i in range(12)]
seasons = pd.DataFrame(seasons, columns=cols, index=y.index)
if "c" in trend:
seasons = seasons.iloc[:, 1:]
reg.append(seasons)
if maxlag:
for lag in lags:
reg.append(y.shift(lag))
if exog:
x = rs.standard_normal((nobs, exog))
cols = ["x.{0}".format(i) for i in range(exog)]
x = pd.DataFrame(x, columns=cols, index=y.index)
reg.append(x)
else:
x = None
reg.insert(0, y)
df = pd.concat(reg, axis=1).dropna()
endog = df.iloc[:, 0]
exog = df.iloc[:, 1:]
return y, x, endog, exog
ar = [0, 3, [1, 3], [3]]
seasonal = [True, False]
trend = ["n", "c", "t", "ct"]
exog = [None, 2]
covs = ["nonrobust", "HC0"]
params = list(product(ar, seasonal, trend, exog, covs))
final = []
for param in params:
if param[0] != 0 or param[1] or param[2] != "n" or param[3]:
final.append(param)
params = final
names = ("AR", "Seasonal", "Trend", "Exog", "Cov Type")
ids = [
", ".join([n + ": " + str(p) for n, p in zip(names, param)])
for param in params
]
@pytest.fixture(scope="module", params=params, ids=ids)
def ols_autoreg_result(request):
ar, seasonal, trend, exog, cov_type = request.param
y, x, endog, exog = gen_ols_regressors(ar, seasonal, trend, exog)
ar_mod = AutoReg(y, ar, seasonal=seasonal, trend=trend, exog=x)
ar_res = ar_mod.fit(cov_type=cov_type)
ols = OLS(endog, exog)
ols_res = ols.fit(cov_type=cov_type, use_t=False)
return ar_res, ols_res
attributes = [
"bse",
"cov_params",
"df_model",
"df_resid",
"fittedvalues",
"llf",
"nobs",
"params",
"resid",
"scale",
"tvalues",
"use_t",
]
def fix_ols_attribute(val, attrib, res):
"""
fixes to correct for df adjustment b/t OLS and AutoReg with nonrobust cov
"""
nparam = res.k_constant + res.df_model
nobs = nparam + res.df_resid
df_correction = (nobs - nparam) / nobs
if attrib in ("scale",):
return val * df_correction
elif attrib == "df_model":
return val + res.k_constant
elif res.cov_type != "nonrobust":
return val
elif attrib in ("bse", "conf_int"):
return val * np.sqrt(df_correction)
elif attrib in ("cov_params", "scale"):
return val * df_correction
elif attrib in ("f_test",):
return val / df_correction
elif attrib in ("tvalues",):
return val / np.sqrt(df_correction)
return val
@pytest.mark.parametrize("attribute", attributes)
def test_equiv_ols_autoreg(ols_autoreg_result, attribute):
a, o = ols_autoreg_result
ols_a = getattr(o, attribute)
ar_a = getattr(a, attribute)
if callable(ols_a):
ols_a = ols_a()
ar_a = ar_a()
ols_a = fix_ols_attribute(ols_a, attribute, o)
assert_allclose(ols_a, ar_a)
def test_conf_int_ols_autoreg(ols_autoreg_result):
a, o = ols_autoreg_result
a_ci = a.conf_int()
o_ci = o.conf_int()
if o.cov_type == "nonrobust":
spread = o_ci.T - o.params
spread = fix_ols_attribute(spread, "conf_int", o)
o_ci = (spread + o.params).T
assert_allclose(a_ci, o_ci)
def test_f_test_ols_autoreg(ols_autoreg_result):
a, o = ols_autoreg_result
r = np.eye(a.params.shape[0])
a_f = a.f_test(r).fvalue
o_f = o.f_test(r).fvalue
o_f = fix_ols_attribute(o_f, "f_test", o)
assert_allclose(a_f, o_f)
@pytest.mark.smoke
def test_other_tests_autoreg(ols_autoreg_result):
a, _ = ols_autoreg_result
r = np.ones_like(a.params)
a.t_test(r)
r = np.eye(a.params.shape[0])
a.wald_test(r)
# TODO: test likelihood for ARX model?
class CheckARMixin(object):
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_6)
def test_bse(self):
bse = np.sqrt(np.diag(self.res1.cov_params()))
# no dof correction for compatability with Stata
assert_almost_equal(bse, self.res2.bse_stata, DECIMAL_6)
assert_almost_equal(self.res1.bse, self.res2.bse_gretl, DECIMAL_5)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_6)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe, DECIMAL_6)
def test_pickle(self):
from io import BytesIO
fh = BytesIO()
# test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0, 0)
res_unpickled = self.res1.__class__.load(fh)
assert type(res_unpickled) is type(self.res1) # noqa: E721
@pytest.mark.smoke
def test_summary(self):
assert isinstance(self.res1.summary().as_text(), str)
@pytest.mark.smoke
def test_pvalues(self):
assert isinstance(self.res1.pvalues, (np.ndarray, pd.Series))
params = product(
[0, 1, 3, [1, 3]],
["n", "c", "t", "ct"],
[True, False],
[0, 2],
[None, 11],
["none", "drop"],
[True, False],
[None, 12],
)
params = list(params)
params = [
param
for param in params
if (param[0] or param[1] != "n" or param[2] or param[3])
]
params = [
param
for param in params
if not param[2] or (param[2] and (param[4] or param[6]))
]
param_fmt = """\
lags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \
missing: {5}, pandas: {6}, hold_back{7}"""
ids = [param_fmt.format(*param) for param in params]
def gen_data(nobs, nexog, pandas, seed=92874765):
rs = np.random.RandomState(seed)
endog = rs.standard_normal((nobs))
exog = rs.standard_normal((nobs, nexog)) if nexog else None
if pandas:
index = pd.date_range(
dt.datetime(1999, 12, 31), periods=nobs, freq="M"
)
endog = pd.Series(endog, name="endog", index=index)
if nexog:
cols = ["exog.{0}".format(i) for i in range(exog.shape[1])]
exog = pd.DataFrame(exog, columns=cols, index=index)
from collections import namedtuple
DataSet = namedtuple("DataSet", ["endog", "exog"])
return DataSet(endog=endog, exog=exog)
@pytest.fixture(scope="module", params=params, ids=ids)
def ar_data(request):
lags, trend, seasonal = request.param[:3]
nexog, period, missing, use_pandas, hold_back = request.param[3:]
data = gen_data(250, nexog, use_pandas)
return Bunch(
trend=trend,
lags=lags,
seasonal=seasonal,
period=period,
endog=data.endog,
exog=data.exog,
missing=missing,
hold_back=hold_back,
)
@pytest.fixture(scope="module")
def ar2(request):
gen = np.random.RandomState(20210623)
e = gen.standard_normal(52)
y = 10 * np.ones_like(e)
for i in range(2, y.shape[0]):
y[i] = 1 + 0.5 * y[i - 1] + 0.4 * y[i - 2] + e[i]
index = pd.period_range("2000-01-01", periods=e.shape[0] - 2, freq="M")
return pd.Series(y[2:], index=index)
params = product(
[0, 3, [1, 3]],
["c"],
[True, False],
[0],
[None, 11],
["drop"],
[True, False],
[None, 12],
)
params = list(params)
params = [
param
for param in params
if (param[0] or param[1] != "n" or param[2] or param[3])
]
params = [
param
for param in params
if not param[2] or (param[2] and (param[4] or param[6]))
]
param_fmt = """\
lags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \
missing: {5}, pandas: {6}, hold_back: {7}"""
ids = [param_fmt.format(*param) for param in params]
# Only test 1/3 to save time
@pytest.fixture(scope="module", params=params[::3], ids=ids[::3])
def plot_data(request):
lags, trend, seasonal = request.param[:3]
nexog, period, missing, use_pandas, hold_back = request.param[3:]
data = gen_data(250, nexog, use_pandas)
return Bunch(
trend=trend,
lags=lags,
seasonal=seasonal,
period=period,
endog=data.endog,
exog=data.exog,
missing=missing,
hold_back=hold_back,
)
@pytest.mark.matplotlib
@pytest.mark.smoke
def test_autoreg_smoke_plots(plot_data, close_figures):
from matplotlib.figure import Figure
mod = AutoReg(
plot_data.endog,
plot_data.lags,
trend=plot_data.trend,
seasonal=plot_data.seasonal,
exog=plot_data.exog,
hold_back=plot_data.hold_back,
period=plot_data.period,
missing=plot_data.missing,
)
res = mod.fit()
fig = res.plot_diagnostics()
assert isinstance(fig, Figure)
if plot_data.exog is None:
fig = res.plot_predict(end=300)
assert isinstance(fig, Figure)
fig = res.plot_predict(end=300, alpha=None, in_sample=False)
assert isinstance(fig, Figure)
assert isinstance(res.summary(), Summary)
@pytest.mark.smoke
def test_autoreg_predict_smoke(ar_data):
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
hold_back=ar_data.hold_back,
period=ar_data.period,
missing=ar_data.missing,
)
res = mod.fit()
exog_oos = None
if ar_data.exog is not None:
exog_oos = np.empty((1, ar_data.exog.shape[1]))
mod.predict(res.params, 0, 250, exog_oos=exog_oos)
if ar_data.lags == 0 and ar_data.exog is None:
mod.predict(res.params, 0, 350, exog_oos=exog_oos)
if isinstance(ar_data.endog, pd.Series) and (
not ar_data.seasonal or ar_data.period is not None
):
ar_data.endog.index = list(range(ar_data.endog.shape[0]))
if ar_data.exog is not None:
ar_data.exog.index = list(range(ar_data.endog.shape[0]))
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
period=ar_data.period,
missing=ar_data.missing,
)
mod.predict(res.params, 0, 250, exog_oos=exog_oos)
@pytest.mark.matplotlib
def test_parameterless_autoreg():
data = gen_data(250, 0, False)
mod = AutoReg(data.endog, 0, trend="n", seasonal=False, exog=None)
res = mod.fit()
for attr in dir(res):
if attr.startswith("_"):
continue
# TODO
if attr in (
"predict",
"f_test",
"t_test",
"initialize",
"load",
"remove_data",
"save",
"t_test",
"t_test_pairwise",
"wald_test",
"wald_test_terms",
):
continue
attr = getattr(res, attr)
if callable(attr):
attr()
else:
assert isinstance(attr, object)
def test_predict_errors():
data = gen_data(250, 2, True)
mod = AutoReg(data.endog, 3)
res = mod.fit()
with pytest.raises(ValueError, match="exog and exog_oos cannot be used"):
mod.predict(res.params, exog=data.exog)
with pytest.raises(ValueError, match="exog and exog_oos cannot be used"):
mod.predict(res.params, exog_oos=data.exog)
with pytest.raises(ValueError, match="hold_back must be >= lags"):
AutoReg(data.endog, 3, hold_back=1)
with pytest.raises(ValueError, match="freq cannot be inferred"):
AutoReg(data.endog.values, 3, seasonal=True)
mod = AutoReg(data.endog, 3, exog=data.exog)
res = mod.fit()
with pytest.raises(ValueError, match=r"The shape of exog \(200, 2\)"):
mod.predict(res.params, exog=data.exog.iloc[:200])
with pytest.raises(ValueError, match="The number of columns in exog_oos"):
mod.predict(res.params, exog_oos=data.exog.iloc[:, :1])
with pytest.raises(ValueError, match="Prediction must have `end` after"):
mod.predict(res.params, start=200, end=199)
with pytest.raises(ValueError, match="exog_oos must be provided"):
mod.predict(res.params, end=250, exog_oos=None)
mod = AutoReg(data.endog, 0, exog=data.exog)
res = mod.fit()
with pytest.raises(ValueError, match="start and end indicate that 10"):
mod.predict(res.params, end=259, exog_oos=data.exog.iloc[:5])
def test_spec_errors():
data = gen_data(250, 2, True)
with pytest.raises(ValueError, match="lags must be a non-negative scalar"):
AutoReg(data.endog, -1)
with pytest.raises(ValueError, match="All values in lags must be pos"):
AutoReg(data.endog, [1, 1, 1])
with pytest.raises(ValueError, match="All values in lags must be pos"):
AutoReg(data.endog, [1, -2, 3])
@pytest.mark.smoke
def test_dynamic_forecast_smoke(ar_data):
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
hold_back=ar_data.hold_back,
period=ar_data.period,
missing=ar_data.missing,
)
res = mod.fit()
res.predict(dynamic=True)
if ar_data.exog is None:
res.predict(end=260, dynamic=True)
@pytest.mark.smoke
def test_ar_select_order_smoke():
data = sunspots.load().data["SUNACTIVITY"]
ar_select_order(data, 4, glob=True, trend="n")
ar_select_order(data, 4, glob=False, trend="n")
ar_select_order(data, 4, seasonal=True, period=12)
ar_select_order(data, 4, seasonal=False)
ar_select_order(data, 4, glob=True)
ar_select_order(data, 4, glob=True, seasonal=True, period=12)
class CheckAutoRegMixin(CheckARMixin):
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse_stata, DECIMAL_6)
class TestAutoRegOLSConstant(CheckAutoRegMixin):
"""
Test AutoReg fit by OLS with a constant.
"""
@classmethod
def setup_class(cls):
data = sunspots.load()
data.endog.index = list(range(len(data.endog)))
cls.res1 = AutoReg(data.endog, lags=9).fit()
cls.res2 = results_ar.ARResultsOLS(constant=True)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=100),
self.res2.FVOLSnneg1start100,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSdefault,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312,
DECIMAL_4,
)
class TestAutoRegOLSNoConstant(CheckAutoRegMixin):
"""f
Test AR fit by OLS without a constant.
"""
@classmethod
def setup_class(cls):
data = sunspots.load()
cls.res1 = AutoReg(np.asarray(data.endog), lags=9, trend="n").fit()
cls.res2 = results_ar.ARResultsOLS(constant=False)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=100),
self.res2.FVOLSnneg1start100,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSdefault,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312,
DECIMAL_4,
)
@pytest.mark.parametrize("lag", list(np.arange(1, 16 + 1)))
def test_autoreg_info_criterion(lag):
data = sunspots.load()
endog = np.asarray(data.endog)
endog_tmp = endog[16 - lag :]
r = AutoReg(endog_tmp, lags=lag).fit()
# See issue #324 for the corrections vs. R
aic = r.aic
hqic = r.hqic
bic = r.bic
res1 = np.array([aic, hqic, bic, r.fpe])
# aic correction to match R
res2 = results_ar.ARLagResults("const").ic.T
comp = res2[lag - 1, :].copy()
k = 2 + lag
pen = np.array([2, 2 * np.log(np.log(r.nobs)), np.log(r.nobs)])
comp[:3] = -2 * r.llf + pen * k
assert_almost_equal(res1, comp, DECIMAL_6)
r2 = AutoReg(endog, lags=lag, hold_back=16).fit()
assert_allclose(r.aic, r2.aic)
assert_allclose(r.bic, r2.bic)
assert_allclose(r.hqic, r2.hqic)
assert_allclose(r.fpe, r2.fpe)
@pytest.mark.parametrize("old_names", [True, False])
def test_autoreg_named_series(reset_randomstate, old_names):
warning = FutureWarning if old_names else None
dates = period_range(start="2011-1", periods=72, freq="M")
y = Series(np.random.randn(72), name="foobar", index=dates)
with pytest.warns(warning):
results = AutoReg(y, lags=2, old_names=old_names).fit()
if old_names:
idx = Index(["intercept", "foobar.L1", "foobar.L2"])
else:
idx = Index(["const", "foobar.L1", "foobar.L2"])
assert results.params.index.equals(idx)
@pytest.mark.smoke
def test_autoreg_series():
# GH#773
dta = macrodata.load_pandas().data["cpi"].diff().dropna()
dates = period_range(start="1959Q1", periods=len(dta), freq="Q")
dta.index = dates
ar = AutoReg(dta, lags=15).fit()
ar.bse
def test_ar_order_select():
# GH#2118
np.random.seed(12345)
y = arma_generate_sample([1, -0.75, 0.3], [1], 100)
ts = Series(
y,
index=date_range(start=dt.datetime(1990, 1, 1), periods=100, freq="M"),
)
res = ar_select_order(ts, maxlag=12, ic="aic")
assert tuple(res.ar_lags) == (1, 2)
assert isinstance(res.aic, dict)
assert isinstance(res.bic, dict)
assert isinstance(res.hqic, dict)
assert isinstance(res.model, AutoReg)
assert not res.seasonal
assert res.trend == "c"
assert res.period is None
def test_autoreg_constant_column_trend():
sample = np.array(
[
0.46341460943222046,
0.46341460943222046,
0.39024388790130615,
0.4146341383457184,
0.4146341383457184,
0.4146341383457184,
0.3414634168148041,
0.4390243887901306,
0.46341460943222046,
0.4390243887901306,
]
)
with pytest.raises(ValueError, match="The model specification cannot"):
AutoReg(sample, lags=7)
with pytest.raises(ValueError, match="The model specification cannot"):
AutoReg(sample, lags=7, trend="n")
@pytest.mark.parametrize("old_names", [True, False])
def test_autoreg_summary_corner(old_names):
data = macrodata.load_pandas().data["cpi"].diff().dropna()
dates = period_range(start="1959Q1", periods=len(data), freq="Q")
data.index = dates
warning = FutureWarning if old_names else None
with pytest.warns(warning):
res = AutoReg(data, lags=4, old_names=old_names).fit()
summ = res.summary().as_text()
assert "AutoReg(4)" in summ
assert "cpi.L4" in summ
assert "03-31-1960" in summ
with pytest.warns(warning):
res = AutoReg(data, lags=0, old_names=old_names).fit()
summ = res.summary().as_text()
if old_names:
assert "intercept" in summ
else:
assert "const" in summ
assert "AutoReg(0)" in summ
@pytest.mark.smoke
def test_autoreg_score():
data = sunspots.load_pandas()
ar = AutoReg(np.asarray(data.endog), 3)
res = ar.fit()
score = ar.score(res.params)
assert isinstance(score, np.ndarray)
assert score.shape == (4,)
assert ar.information(res.params).shape == (4, 4)
assert_allclose(-ar.hessian(res.params), ar.information(res.params))
def test_autoreg_roots():
data = sunspots.load_pandas()
ar = AutoReg(np.asarray(data.endog), lags=1)
res = ar.fit()
assert_almost_equal(res.roots, np.array([1.0 / res.params[-1]]))
def test_equiv_dynamic(reset_randomstate):
e = np.random.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
for i in range(1, 1001):
y[i] = 0.9 * y[i - 1] + e[i]
mod = AutoReg(y, 1)
res = mod.fit()
pred0 = res.predict(500, 800, dynamic=0)
pred1 = res.predict(500, 800, dynamic=True)
idx = pd.date_range(dt.datetime(2000, 1, 30), periods=1001, freq="M")
y = pd.Series(y, index=idx)
mod = AutoReg(y, 1)
res = mod.fit()
pred2 = res.predict(idx[500], idx[800], dynamic=idx[500])
pred3 = res.predict(idx[500], idx[800], dynamic=0)
pred4 = res.predict(idx[500], idx[800], dynamic=True)
assert_allclose(pred0, pred1)
assert_allclose(pred0, pred2)
assert_allclose(pred0, pred3)
assert_allclose(pred0, pred4)
def test_dynamic_against_sarimax():
rs = np.random.RandomState(12345678)
e = rs.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
for i in range(1, 1001):
y[i] = 0.9 * y[i - 1] + e[i]
smod = SARIMAX(y, order=(1, 0, 0), trend="c")
sres = smod.fit(disp=False)
mod = AutoReg(y, 1)
spred = sres.predict(900, 1100)
pred = mod.predict(sres.params[:2], 900, 1100)
assert_allclose(spred, pred)
spred = sres.predict(900, 1100, dynamic=True)
pred = mod.predict(sres.params[:2], 900, 1100, dynamic=True)
assert_allclose(spred, pred)
spred = sres.predict(900, 1100, dynamic=50)
pred = mod.predict(sres.params[:2], 900, 1100, dynamic=50)
assert_allclose(spred, pred)
def test_predict_seasonal():
rs = np.random.RandomState(12345678)
e = rs.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
effects = 10 * np.cos(np.arange(12) / 11 * 2 * np.pi)
for i in range(1, 1001):
y[i] = 10 + 0.9 * y[i - 1] + e[i] + effects[i % 12]
ys = pd.Series(
y, index=pd.date_range(dt.datetime(1950, 1, 1), periods=1001, freq="M")
)
mod = AutoReg(ys, 1, seasonal=True)
res = mod.fit()
c = res.params.iloc[0]
seasons = np.zeros(12)
seasons[1:] = res.params.iloc[1:-1]
ar = res.params.iloc[-1]
pred = res.predict(900, 1100, True)
direct = np.zeros(201)
direct[0] = y[899] * ar + c + seasons[900 % 12]
for i in range(1, 201):
direct[i] = direct[i - 1] * ar + c + seasons[(900 + i) % 12]
direct = pd.Series(
direct, index=pd.date_range(ys.index[900], periods=201, freq="M")
)
|
assert_series_equal(pred, direct)
|
pandas.testing.assert_series_equal
|
# Módulo de creación y consultas al dataframe a partir de un Reporte de Moodle
# Autor: <NAME>, Universidad de Cienfuegos
###
import pandas as pd
#from . import my_globals
from django.conf import settings
import os
#from glob import iglob
import IP2Location
from . import moodle_backup, cluster
# VARIABLES GLOBALES ·········
# Eventos de participación activa
ACTIVE_PART = [
# Foro
"Algún contenido ha sido publicado.", "Tema creado", "Mensaje creado", "Mensaje actualizado",
# Tarea
"Se ha enviado una entrega", "Se ha entregado una extensión",
# Wiki
"Comentario creado", "Página wiki creada", "Página de la wiki actualizada",
# Taller
"Se ha subido una entrega", "Entrega creada", "Entrega actualizada",
# Cuestionario
"Intento enviado",
# Glosario y Comentarios
"Comentario creado",
# Glosario
"La entrada ha sido creada", "La entrada ha sido actualizada"
# Chat
"Mensaje enviado",
# Módulo de encuesta
"Respuesta enviada"]
ACTIVE_PART_FORO = [
"Algún contenido ha sido publicado.", "Tema creado", "Mensaje creado", "Mensaje actualizado"]
COMPLETED_ASSIGMENT = "Se ha enviado una entrega"
GLOSARY_PART = [
# Glosario
"Comentario creado", "La entrada ha sido creada", "La entrada ha sido actualizada"]
CUESTIONARIO_PART = ["Intento enviado"]
# ·········
# CREACIÓN DEL DATAFRAME ·········
def data_upload(file):
df = pd.read_csv(file)
df = change_columns_name(df)
df = del_user_by_name(df, ["-"])
df = add_mont_day_hour_columns(df)
df = add_weekday_columns(df)
df = add_ID_user_column(df)
df = add_ID_resource_column(df)
#df = create_dynamic_session_id(df)
#df = moodle_backup.course_structure(df, backup_file)
return df
def df_from_multiple_file(files):
df = pd.DataFrame()
for f in files:
dfaux = data_upload(f)
#pos1 = f.find("\\")
pos2 = str(f.name).find(".csv")
nombre_curso = str(f.name)[0:pos2]
dfaux["Curso"] = nombre_curso
df =
|
pd.concat([df,dfaux], ignore_index=True)
|
pandas.concat
|
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self):
index = self.index
assert len(index) == 2
assert index.size == 2
assert index.shape == (2, )
tm.assert_index_equal(index.left, Index([0, 1]))
tm.assert_index_equal(index.right, Index([1, 2]))
tm.assert_index_equal(index.mid, Index([0.5, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.index_with_nan
assert len(index) == 3
assert index.size == 3
assert index.shape == (3, )
tm.assert_index_equal(index.left, Index([0, np.nan, 1]))
tm.assert_index_equal(index.right, Index([1, np.nan, 2]))
tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), np.nan,
Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self):
index = self.index
assert not index.hasnans
tm.assert_numpy_array_equal(index.isna(),
np.array([False, False]))
tm.assert_numpy_array_equal(index.notna(),
np.array([True, True]))
index = self.index_with_nan
assert index.hasnans
tm.assert_numpy_array_equal(index.notna(),
np.array([True, False, True]))
tm.assert_numpy_array_equal(index.isna(),
np.array([False, True, False]))
def test_copy(self):
actual = self.index.copy()
assert actual.equals(self.index)
actual = self.index.copy(deep=True)
assert actual.equals(self.index)
assert actual.left is not self.index.left
def test_ensure_copied_data(self):
# exercise the copy flag in the constructor
# not copying
index = self.index
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self):
idx = self.index
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert not idx.equals(idx.astype(object))
assert not idx.equals(np.array(idx))
assert not idx.equals(list(idx))
assert not idx.equals([1, 2])
assert not idx.equals(np.array([1, 2]))
assert not idx.equals(pd.date_range('20130101', periods=2))
def test_astype(self):
idx = self.index
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_where(self):
expected = self.index
result = self.index.where(self.index.notna())
tm.assert_index_equal(result, expected)
idx = IntervalIndex.from_breaks([1, 2])
result = idx.where([True, False])
expected = IntervalIndex.from_intervals(
[Interval(1.0, 2.0, closed='right'), np.nan])
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
pass
def test_delete(self):
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.delete(0)
assert expected.equals(actual)
def test_insert(self):
expected = IntervalIndex.from_breaks(range(4))
actual = self.index.insert(2, Interval(2, 3))
assert expected.equals(actual)
pytest.raises(ValueError, self.index.insert, 0, 1)
pytest.raises(ValueError, self.index.insert, 0,
Interval(2, 3, closed='left'))
def test_take(self):
actual = self.index.take([0, 1])
assert self.index.equals(actual)
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2])
actual = self.index.take([0, 0, 1])
assert expected.equals(actual)
def test_monotonic_and_unique(self):
assert self.index.is_monotonic
assert self.index.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)])
assert idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 2)])
assert not idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 2), (0, 2)])
assert not idx.is_unique
assert idx.is_monotonic
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed='right')
assert i[0] == Interval(0.0, 1.0)
assert i[1] == Interval(1.0, 2.0)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right')
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right')
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed='right')
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_breaks([0, 1, 2], closed='both')
assert index.slice_locs(1, 1) == (0, 2)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self):
expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)])
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan])
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan])
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)])
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self):
other = IntervalIndex.from_arrays([2], [3])
expected = IntervalIndex.from_arrays(range(3), range(1, 4))
actual = self.index.union(other)
assert expected.equals(actual)
actual = other.union(self.index)
assert expected.equals(actual)
tm.assert_index_equal(self.index.union(self.index), self.index)
tm.assert_index_equal(self.index.union(self.index[:1]),
self.index)
def test_intersection(self):
other = IntervalIndex.from_breaks([1, 2, 3])
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.intersection(other)
assert expected.equals(actual)
tm.assert_index_equal(self.index.intersection(self.index),
self.index)
def test_difference(self):
tm.assert_index_equal(self.index.difference(self.index[:1]),
self.index[1:])
def test_symmetric_difference(self):
result = self.index[:1].symmetric_difference(self.index[1:])
expected = self.index
tm.assert_index_equal(result, expected)
def test_set_operation_errors(self):
pytest.raises(ValueError, self.index.union, self.index.left)
other = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
pytest.raises(ValueError, self.index.union, other)
def test_isin(self):
actual = self.index.isin(self.index)
tm.assert_numpy_array_equal(np.array([True, True]), actual)
actual = self.index.isin(self.index[:1])
tm.assert_numpy_array_equal(np.array([True, False]), actual)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index ==
|
IntervalIndex.from_breaks([0, 1, 2], 'left')
|
pandas.IntervalIndex.from_breaks
|
import os
import random
import yaml
from collections import Counter
from nltk.corpus import movie_reviews
from nltk.corpus import senseval
import pandas as pd
import json
class datasetGenerator:
"""
creates a base dataset from senseval in NLTK
it generates data.json dataset by instanciating it
or by retrieving data from https://github.com/sebischair/NLU-Evaluation-Corpora
create chatito https://rodrigopivi.github.io/Chatito/
"""
# TODO: consolidate dataflow to pandas dataframe y csv o yaml
def __init__(self, dataset="", size=200, filename="data.json", randomSeed=42):
if dataset == "":
if "json.data" in os.walk(os.path.join("..", "data", filename)):
return
else:
dataset = "senseval"
if dataset == "senseval":
self.instances = senseval.instances("hard.pos")
self.getDataNLTK()
self.sampleData(size, randomSeed)
self.saveData()
if (
dataset == "AskUbuntuCorpus"
or dataset == "ChatbotCorpus"
or dataset == "WebApplicationsCorpus"
):
self.getDataJson(dataset)
self.sampleData(size, randomSeed)
self.saveData()
if dataset not in [
"",
"senseval",
"AskUbuntuCorpus",
"ChatbotCorpus",
"WebApplicationsCorpus",
]:
raise Exception("not implemented other dataset than senseval")
def getDataNLTK(self):
self.labels = []
self.sentences = []
for instance in self.instances:
try:
self.sentences.append(
" ".join([i for i, _ in instance.context if i.isalpha()])
)
self.labels.append(instance.senses[0])
except:
pass
def getDataJson(self, filename):
with open(
os.path.join("..", "data", filename + ".json"), encoding="utf8"
) as datafile:
data = json.load(datafile)
df = pd.DataFrame(data["sentences"])
df = df.loc[df["intent"] != "None"]
df = self.changeToCompliantLabel(df)
self.labels = df.intent.tolist()
self.sentences = df.text.tolist()
def changeToCompliantLabel(self, df):
def getCompliantLabel(uniqueLabel):
return "".join([c for c in uniqueLabel if c.isalpha()])
self.uniqueLabels = df.intent.unique()
for uL in self.uniqueLabels:
df["intent"].replace(uL, getCompliantLabel(uL), inplace=True)
return df
def sampleData(self, size=200, randomSeed=42):
random.seed(randomSeed)
self.sampleList = random.sample(
range(len(self.sentences)), min(size, len(self.sentences))
)
self.sentences = [self.sentences[i] for i in self.sampleList]
self.labels = [self.labels[i] for i in self.sampleList]
self.uniqueLabels = dict(Counter(self.labels))
def saveData(self, filename="data.csv"):
df =
|
pd.DataFrame(data={"sentences": self.sentences, "labels": self.labels})
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# Author:
# <NAME>
# Emotional Sentiment on Twitter
# A coronavirus vaccine online firestorm
# In this python script you will find examples of some of the most common
# NLP (Natural Language Processing) techniques used to uncover patterns of
# sentiment and emotion on social media microblogging platforms like Twitter.
# It is organized as follows:
# - Step 1: Exploratory analysis
# - Step 2: Text processing
# - Step 3: Sentiment analysis
# - Step 4: Word frequency
# - Step 5: LDA topics extraction
# - Step 6: Emotion analysis
#
# ## Step 1: EXPLORATORY ANALYSIS
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
from datetime import date
import re # for regular expressions
import string
# Importing the data
tweets = pd.read_csv('input/tweets.csv')
# getting the date column ready for datetime operations
tweets['datetime']= pd.to_datetime(tweets['datetime'])
# A plot of the tweets with the word "CureVac" over the past 6 years.
fig = plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=tweets.set_index("datetime").groupby(pd.Grouper(freq='Y')).count())
plt.title('Tweets with "CureVac" from 2014 to 2020', fontsize=20)
plt.xlabel('Years', fontsize=15)
plt.ylabel('Tweets', fontsize=15)
fig.savefig("images/All_Tweets_2014-2020.png")
# creating a column to filter the online storm period (from 15 and 18 March)
def make_onlinestorm_field():
for i, row in tweets.iterrows():
if pd.to_datetime(tweets.at[i, 'datetime']) > pd.Timestamp(date(2020,3,15)):
tweets.at[i, 'onlinestorm'] = True
else:
tweets.at[i, 'onlinestorm'] = False
make_onlinestorm_field()
# counting tweets during the three days online storm
print('In three days, tweets went over {}, all around the world.'.format(tweets[tweets['onlinestorm']]['onlinestorm'].count()))
tweets[tweets['onlinestorm']]
# Let's now have a look at the distribution of the tweets, by the hour, during the online storm.
fig = plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=tweets[tweets['onlinestorm'] == True].set_index("datetime").groupby(pd.Grouper(freq='H')).onlinestorm.count())
plt.title('Tweets per hour from 15 to 18 March 2020', fontsize=20)
plt.xlabel('Time (hours)', fontsize=15)
plt.ylabel('No. Tweets', fontsize=15)
fig.savefig("images/All_Tweets_Onlinestorm.png")
# It is time to have a first look at the content of the tweets and do some descriptive statistics.
# For now, I will focus only on features like hastags, mentions, urls, capital words and words in general.
# A function to count tweets based on regular expressions
def count_tweets(reg_expression, tweet):
tweets_list = re.findall(reg_expression, tweet)
return len(tweets_list)
# Creating a dictionary to hold these counts
content_count = {
'words' : tweets['text'].apply(lambda x: count_tweets(r'\w+', x)),
'mentions' : tweets['text'].apply(lambda x: count_tweets(r'@\w+', x)),
'hashtags' : tweets['text'].apply(lambda x: count_tweets(r'#\w+', x)),
'urls' : tweets['text'].apply(lambda x: count_tweets(r'http.?://[^\s]+[\s]?', x)),
}
df = pd.concat([tweets, pd.DataFrame(content_count)], axis=1)
# Tweets descriptive statistics
# Display descriptive statistics fdor words, mentions,
# hashtags and urls
for key in content_count.keys():
print()
print('Descriptive statistics for {}'.format(key))
print(df.groupby('onlinestorm')[key].describe())
# Now plot them
for key in content_count.keys():
bins = np.arange(df[key].min(), df[key].max() + 1)
g = sns.FacetGrid(df, col='onlinestorm', height=5, hue='onlinestorm', palette="RdYlGn")
g = g.map(sns.distplot, key, kde=False, norm_hist=True, bins=bins)
plt.savefig('images/Descriptive_stats_for_' + key + '.png')
# Step 2: TEXT PROCESSING
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk import pos_tag
# I am adding my own stopwords list to the NLTK list.
# This way we can drop words that are irrelevant for text processing
MY_STOPWORDS = ['curevac','vaccine','german','mrna','biotech','cancer', 'lilly','eli','ag','etherna_immuno', 'translatebio', 'mooreorless62','boehringer', 'ingelheim','biopharmaceutical', 'company']
STOPLIST = set(stopwords.words('english') + list(MY_STOPWORDS))
SYMBOLS = " ".join(string.punctuation).split(" ") + ["-", "...", "”", "``", ",", ".", ":", "''","#","@"]
# The NLTK lemmatizer and stemmer classes
lemmatizer = WordNetLemmatizer()
stemmer = SnowballStemmer('english')
# read english selected tweets, no duplicates
tweets = pd.read_csv('input/tweets_en.csv')
# I use the POS tagging from NLTK to retain only adjectives, verbs, adverbs
# and nouns as a base for for lemmatization.
def get_lemmas(tweet):
# A dictionary to help convert Treebank tags to WordNet
treebank2wordnet = {'NN':'n', 'JJ':'a', 'VB':'v', 'RB':'r'}
postag = ''
lemmas_list = []
for word, tag in pos_tag(word_tokenize(tweet)):
if tag.startswith("JJ") or tag.startswith("RB") or tag.startswith("VB") or tag.startswith("NN"):
try:
postag = treebank2wordnet[tag[:2]]
except:
postag = 'n'
lemmas_list.append(lemmatizer.lemmatize(word.lower(), postag))
return lemmas_list
# We will now pre-process the tweets, following a pipeline of tokenization,
# filtering, case normalization and lemma extraction.
# This is the function to clean and filter the tokens in each tweet
def clean_tweet(tokens):
filtered = []
for token in tokens:
if re.search('[a-zA-Z]', token):
if token not in STOPLIST:
if token[0] not in SYMBOLS:
if not token.startswith('http'):
if '/' not in token:
if '-' not in token:
filtered.append(token)
return filtered
# Prior to lemmatization, I apply POS (part-of-speech) tagging to make sure that only the
# adjectives, verbs, adverbs and nouns are retained.
# Starts the lemmatization process
def get_lemmatized(tweet):
all_tokens_string = ''
filtered = []
tokens = []
# lemmatize
tokens = [token for token in get_lemmas(tweet)]
# filter
filtered = clean_tweet(tokens)
# join everything into a single string
all_tokens_string = ' '.join(filtered)
return all_tokens_string
# get the lemmatized tweets and puts the result in an "edited" text column
# for future use in this script
edited = ''
for i, row in tweets.iterrows():
edited = get_lemmatized(tweets.loc[i]['text'])
if len(edited) > 0:
tweets.at[i,'edited'] = edited
else:
tweets.at[i,'edited'] = None
# After lemmatization, some tweets may end up with the same words
# Let's make sure that we have no duplicates
tweets.drop_duplicates(subset=['edited'], inplace=True)
tweets.dropna(inplace=True)
# With these text processing steps, and the removal of duplicates,
# the final sample counts 5,508 English-language tweets,
# with an average of 30 words (SD 12.5, ranging from 4 to 61 words).
# Using apply/lambda to create a new column with the number of words in each tweet
tweets['word_count'] = tweets.apply(lambda x: len(x['text'].split()),axis=1)
t = pd.DataFrame(tweets['word_count'].describe()).T
tweets.head()
# Step 3: SENTIMENT ANALYSIS
# Let us import the VADER analyser.
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# For the puropose of the timeseries analysis, we must make sure that the tweets are all correctly sorted.
tweets['datetime']=pd.to_datetime(tweets['datetime'])
tweets.sort_values('datetime', inplace=True, ascending=True)
tweets = tweets.reset_index(drop=True)
# Creating a column to "filter" the online storm period.
make_onlinestorm_field()
# To avoid repetitions in our code, here are some plotting functions
# that will be called often ...
def plot_sentiment_period(df, info):
# Using the mean values of sentiment for each period
df1 = df.groupby(df['datetime'].dt.to_period(info['period'])).mean()
df1.reset_index(inplace=True)
df1['datetime'] = pd.PeriodIndex(df1['datetime']).to_timestamp()
plot_df = pd.DataFrame(df1, df1.index, info['cols'])
plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=plot_df, linewidth = 3, dashes = False)
plt.legend(loc='best', fontsize=15)
plt.title(info['title'], fontsize=20)
plt.xlabel(info['xlab'], fontsize=15)
plt.ylabel(info['ylab'], fontsize=15)
plt.tight_layout()
plt.savefig('images/' + info['fname'])
return
def plot_fractions(props, title, fname):
plt1 = props.plot(kind='bar', stacked=False, figsize=(16,5), colormap='Spectral')
plt.legend(bbox_to_anchor=(1.005, 1), loc=2, borderaxespad=0.)
plt.xlabel('Online storm', fontweight='bold', fontsize=18)
plt.xticks(rotation=0,fontsize=14)
#plt.ylim(0, 0.5)
plt.ylabel('Fraction of Tweets', fontweight='bold', fontsize=18)
plt1.set_title(label=title, fontweight='bold', size=20)
plt.tight_layout()
plt.savefig('images/' + fname + '.png')
return
def plot_frequency_chart(info):
fig, ax = plt.subplots(figsize=(14, 8))
sns.set_context("notebook", font_scale=1)
ax = sns.barplot(x=info['x'], y=info['y'], data=info['data'], palette=(info['pal']))
ax.set_title(label=info['title'], fontweight='bold', size=18)
plt.ylabel(info['ylab'], fontsize=16)
plt.xlabel(info['xlab'], fontsize=16)
plt.xticks(rotation=info['angle'],fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig('images/' + info['fname'])
return
# Calling VADER
analyzer = SentimentIntensityAnalyzer()
# Get VADER Compound value for sentiment intensity
tweets['sentiment_intensity'] = [analyzer.polarity_scores(v)['compound'] for v in tweets['edited']]
# This function returns the sentiment category
def get_sentiment(intensity):
if intensity >= 0.05:
return 'Positive'
elif (intensity >= -0.05) and (intensity < 0.05):
return 'Neutral'
else:
return 'Negative'
# Using pandas apply/lambda to speed up the process
tweets['sentiment'] = tweets.apply(lambda x: get_sentiment(x['sentiment_intensity']),axis=1)
# The next plot gives us a clear image of the “explosion” of contradictory sentiments in this period:
df=tweets.loc[:,['datetime','sentiment_intensity']]
# filter for these dates
df.set_index('datetime',inplace=True)
df=df[(df.index>='2020-03-12') & (df.index<'2020-03-18')]
df.plot(figsize=(12,6));
plt.ylabel('Compoud score', fontsize=15)
plt.xlabel('Tweets', fontsize=15)
plt.legend().set_visible(False)
plt.title('Sentiment on tweets with CureVac (12 March to 18 March)', fontsize=20)
plt.tight_layout()
sns.despine(top=True)
plt.savefig('images/Sentiment_during_onlinestorm.png')
plt.show()
# And this one will shows us a comparison of the sentiments before and during the online strom.
# Values are normalized to take into account the number of tweets in each
# of the two different periods
props = tweets.groupby('onlinestorm')['sentiment'].value_counts(normalize=True).unstack()
plot_fractions(props,'Percentage of sentiments before and during the online storm',
'Fraction_sentiments_before_and_during_onlinestorm')
# Step 4: Word frequency
# We need these imports for the wordcloud representation:
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from matplotlib.colors import makeMappingArray
from palettable.colorbrewer.diverging import Spectral_4
from collections import Counter # Counts the most common items in a list
def display_wordcloud(tokens, title, fname):
tokens_upper = [token.upper() for token in tokens]
cloud_mask = np.array(Image.open("images/cloud_mask.png"))
wordcloud = WordCloud(max_font_size=100,
max_words=50, width=2500,
height=1750,mask=cloud_mask,
background_color="white").generate(" ".join(tokens_upper))
plt.figure()
fig, ax = plt.subplots(figsize=(14, 8))
plt.title(title, fontsize=20)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.savefig('images/'+ fname + '.png')
plt.show()
return
def join_edited_string(edited_tweets):
edited_string = ''
for row in edited_tweets:
edited_string = edited_string + ' ' + row
return edited_string
def get_trigrams(trigrams, top_grams):
grams_str = []
data = []
gram_counter = Counter(trigrams)
for grams in gram_counter.most_common(10):
gram = ''
grams_str = grams[0]
grams_str_count = []
for n in range(0,3):
gram = gram + grams_str[n] + ' '
grams_str_count.append(gram)
grams_str_count.append(grams[1])
data.append(grams_str_count)
print(grams_str_count)
df = pd.DataFrame(data, columns = ['Grams', 'Count'])
return df
# Let’s have a look at the 20 most frequent words in tweets before the online storm.
# Filtering the tweets of the 6 years before the online storm
df = tweets[tweets['onlinestorm'] == False]
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# get trigrams
trigrams = nltk.trigrams(tokens)
# plot word frequency during online storm
word_counter = Counter(tokens)
df_counter = pd.DataFrame(word_counter.most_common(20), columns = ['word', 'freq'])
info = {'data': df_counter, 'x': 'freq', 'y': 'word',
'xlab': 'Count', 'ylab': 'Words', 'pal':'viridis',
'title': 'Most frequent words before online storm',
'fname':'word_frequency_before_onlinestorm.png',
'angle': 90}
plot_frequency_chart(info)
# plot trigram frequency
df_trigrams = get_trigrams(trigrams, 10)
info = {'data': df_trigrams, 'x': 'Grams', 'y': 'Count',
'xlab': 'Trigrams', 'ylab': 'Count', 'pal':'viridis',
'title': 'Most frequent trigrams before online storm',
'fname':'trigrams_frequency_before_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# And the wordcloud ...
display_wordcloud(tokens, 'Wordcloud of most frequent words before online storm',
'WordCloud_before_onlinestorm')
# Filtering the tweets of the 3 days of the online storm
df =tweets[tweets['onlinestorm']]
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# get trigrams
trigrams = nltk.trigrams(tokens)
# plot word frequency during online storm
word_counter = Counter(tokens)
df_counter = pd.DataFrame(word_counter.most_common(20), columns = ['word', 'freq'])
info = {'data': df_counter, 'x': 'freq', 'y': 'word',
'xlab': 'Count', 'ylab': 'Words', 'pal':'inferno',
'title': 'Most frequent words during online storm',
'fname':'word_frequency_during_onlinestorm.png',
'angle': 90}
plot_frequency_chart(info)
# In[139]:
# plot trigrams frequency
df_trigrams = get_trigrams(trigrams, 10)
info = {'data': df_trigrams, 'x': 'Grams', 'y': 'Count',
'xlab': 'Trigrams', 'ylab': 'Count', 'pal':'inferno',
'title': 'Most frequent trigrams during online storm',
'fname':'trigrams_frequency_during_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# In[140]:
display_wordcloud(tokens, 'Wordcloud of most frequent words during online storm',
'WordCloud_during_onlinestorm')
# Step 5: LDA topics extraction
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer
# I am using here Susan Li's functions to get the top words from a topic:
def get_keys(topic_matrix):
'''
returns an integer list of predicted topic
categories for a given topic matrix
'''
keys = topic_matrix.argmax(axis=1).tolist()
return keys
def keys_to_counts(keys):
'''
returns a tuple of topic categories and their
accompanying magnitudes for a given list of keys
'''
count_pairs = Counter(keys).items()
categories = [pair[0] for pair in count_pairs]
counts = [pair[1] for pair in count_pairs]
return (categories, counts)
def get_top_n_words(n, n_topics, keys, document_term_matrix, tfidf_vectorizer):
'''
returns a list of n_topic strings, where each string contains the n most common
words in a predicted category, in order
'''
top_word_indices = []
for topic in range(n_topics):
temp_vector_sum = 0
for i in range(len(keys)):
if keys[i] == topic:
temp_vector_sum += document_term_matrix[i]
temp_vector_sum = temp_vector_sum.toarray()
top_n_word_indices = np.flip(np.argsort(temp_vector_sum)[0][-n:],0)
top_word_indices.append(top_n_word_indices)
top_words = []
for topic in top_word_indices:
topic_words = []
for index in topic:
temp_word_vector = np.zeros((1,document_term_matrix.shape[1]))
temp_word_vector[:, index] = 1
the_word = tfidf_vectorizer.inverse_transform(temp_word_vector)[0][0]
try:
topic_words.append(the_word.encode('ascii').decode('utf-8'))
except:
pass
top_words.append(", ".join(topic_words))
return top_words
# And here is a function for topics extraction using LDA, in which I produce a dataframe
# with the topics and their top words to facilitate the plotting that follows.
# LDA topics
def get_topics(edited, n_topics, n_words):
eds = edited.values
vec = TfidfVectorizer(use_idf=True, smooth_idf=True)
document_term_matrix = vec.fit_transform(eds)
model = LatentDirichletAllocation(n_components=n_topics)
topic_matrix = model.fit_transform(document_term_matrix)
keys = get_keys(topic_matrix)
categories, counts = keys_to_counts(keys)
top_n_words = get_top_n_words(n_words, n_topics, keys, document_term_matrix, vec)
topics = ['Topic {}: \n'.format(i + 1) + top_n_words[i] for i in categories]
data=[]
for i, topic in enumerate(topics):
tmp = []
tmp.append(topic)
tmp.append(counts[i])
data.append(tmp)
df_topics = pd.DataFrame(data, columns = ['Topics', 'Count'])
return df_topics
# Topics before the online storm
# Filtering the tweets of the 6 years before the online storm
df = tweets[tweets['onlinestorm'] == False]
# LDA topics
df_topics = get_topics(df['edited'], 5, 5)
info = {'data': df_topics, 'x': 'Topics', 'y': 'Count',
'xlab': 'Topics', 'ylab': 'Count', 'pal':'viridis',
'title': 'LDA Topics before Online Storm',
'fname':'LDA_Topics_before_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# Topics during the online storm
# Filtering the tweets of the 3 days of the online storm
df =tweets[tweets['onlinestorm']]
# LDA topics
df_topics = get_topics(df['edited'], 5, 5)
info = {'data': df_topics, 'x': 'Topics', 'y': 'Count',
'xlab': 'Topics', 'ylab': 'Count', 'pal':'inferno',
'title': 'Main Topics during Online Storm',
'fname':'LDA_Topics_during_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# Step 6: Emotion analysis
import termcolor
import sys
from termcolor import colored, cprint
plt.style.use('fivethirtyeight')
# Importing the data from the NCR lexicon
ncr = pd.read_csv('input/NCR-lexicon.csv', sep =';')
# Let's create a list of the emotions
emotions = ['Anger', 'Anticipation','Disgust','Fear', 'Joy','Sadness', 'Surprise', 'Trust']
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# We build now two dictionaries with indexes and unique words, for future reference
unique_words = set(tokens)
word_to_ind = dict((word, i) for i, word in enumerate(unique_words))
ind_to_word = dict((i, word) for i, word in enumerate(unique_words))
def plot_emotions_period(df, cols, title, fname, period = 'h' ):
df1 = df.groupby(df['datetime'].dt.to_period(period)).mean()
df1.reset_index(inplace=True)
df1['datetime'] =
|
pd.PeriodIndex(df1['datetime'])
|
pandas.PeriodIndex
|
from numpy import loadtxt
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
n = 25
particle = ['NO2', 'O3', 'NO', 'CO', 'PM1', 'PM2.5', 'PM10']
def actual_vs_predicteddl():
select_model = st.sidebar.radio("Choose Model ?", ('LSTM','GRU','CNN'))
select_particle = st.sidebar.radio(
"Choose Particle ?", ('NO2', 'O3', 'NO', 'CO', 'PM1', 'PM2.5', 'PM10'))
if select_particle == 'NO2':
loc = 0
if select_particle == 'O3':
loc = 1
if select_particle == 'NO':
loc = 2
if select_particle == 'CO':
loc = 3
if select_particle == 'PM1':
loc = 4
if select_particle == 'PM2.5':
loc = 5
if select_particle == 'PM10':
loc = 6
if select_model == 'LSTM':
get_lstm(loc)
if select_model == 'CNN':
get_cnn(loc)
if select_model == 'GRU':
get_gru(loc)
def get_lstm(loc):
lstm_y_test = loadtxt('Models_OPC/lstm_y_test.csv', delimiter=',')
lstm_y_test_pred = loadtxt(
'Models_OPC/lstm_y_test_pred.csv', delimiter=',')
l1 = list()
l1.append(['Y_Actual']*n)
l1.append(np.round(lstm_y_test[:n, loc], 9))
l1.append(list(range(1, n+1)))
temp1 = np.array(l1).transpose()
x1 = list(range(1, n+1))
chart_data1 = pd.DataFrame(temp1, x1, columns=['Data', particle[loc], 'X'])
l2 = list()
l2.append(['Y_Predicted']*n)
l2.append(np.round(lstm_y_test_pred[:n, loc], 9))
l2.append(list(range(1, n+1)))
temp2 = np.array(l2).transpose()
x2 = list(range(n+1, 2*n+1))
chart_data2 = pd.DataFrame(temp2, x2, columns=['Data', particle[loc], 'X'])
frames = [chart_data1, chart_data2]
results = pd.concat(frames)
chart = alt.Chart(results.reset_index()).mark_line().encode(
x='X',
y=particle[loc],
color='Data',
strokeDash='Data',
).properties(
title='Plot of Actual vs Predicted for LSTM model for ' +
particle[loc]+' particle'
)
st.altair_chart(chart, use_container_width=True)
def get_gru(loc):
lstm_y_test = loadtxt('Models_OPC/gru_y_test.csv', delimiter=',')
lstm_y_test_pred = loadtxt(
'Models_OPC/gru_y_test_pred.csv', delimiter=',')
l1 = list()
l1.append(['Y_Actual']*n)
l1.append(np.round(lstm_y_test[:n, loc], 9))
l1.append(list(range(1, n+1)))
temp1 = np.array(l1).transpose()
x1 = list(range(1, n+1))
chart_data1 = pd.DataFrame(temp1, x1, columns=['Data', particle[loc], 'X'])
l2 = list()
l2.append(['Y_Predicted']*n)
l2.append(np.round(lstm_y_test_pred[:n, loc], 9))
l2.append(list(range(1, n+1)))
temp2 = np.array(l2).transpose()
x2 = list(range(n+1, 2*n+1))
chart_data2 = pd.DataFrame(temp2, x2, columns=['Data', particle[loc], 'X'])
frames = [chart_data1, chart_data2]
results = pd.concat(frames)
chart = alt.Chart(results.reset_index()).mark_line().encode(
x='X',
y=particle[loc],
color='Data',
strokeDash='Data',
).properties(
title='Plot of Actual vs Predicted for GRU model for ' +
particle[loc]+' particle'
)
st.altair_chart(chart, use_container_width=True)
def get_cnn(loc):
lstm_y_test = loadtxt('Models_OPC/cnn_y_test.csv', delimiter=',')
lstm_y_test_pred = loadtxt(
'Models_OPC/cnn_y_test_pred.csv', delimiter=',')
l1 = list()
l1.append(['Y_Actual']*n)
l1.append(np.round(lstm_y_test[:n, loc], 9))
l1.append(list(range(1, n+1)))
temp1 = np.array(l1).transpose()
x1 = list(range(1, n+1))
chart_data1 =
|
pd.DataFrame(temp1, x1, columns=['Data', particle[loc], 'X'])
|
pandas.DataFrame
|
import sys
import time
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from glob import glob
from sklearn.neural_network import MLPClassifier
def load_data(file_name):
print('FILE EXIST')
featuresDF = pd.read_csv(file_name, sep=';', dtype={'STUDENT': str})
return featuresDF
def test_classifier(clf_name, clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
cv_results = pd.DataFrame(clf.cv_results_)
cv_results.to_csv('./results_5/cv_results.csv', sep=';', float_format='%.4f')
y_pred = clf.predict(X_test)
print(clf_name)
print(accuracy_score(y_test, y_pred))
return accuracy_score(y_test, y_pred)
def cross_validate(clf, X, y, features):
group_kfold = GroupShuffleSplit(n_splits=1, test_size=0.2, random_state=5)
cv_scores = [clf.fit(X[train], y[train]).score(X[test], y[test])
for train, test in group_kfold.split(X, y, features['FILE'])]
return cv_scores
if __name__ == '__main__':
idx = int(sys.argv[1])
feature_path = '../mfcc_data_19c'
feature_file = sorted(glob(feature_path + '/*.csv'))
feature_file_sorted = sorted(feature_file, key=lambda x: int(x.split('MFCC_')[1].split('.csv')[0]))
print(feature_file_sorted[idx])
feature_file = feature_file_sorted[idx]
features = load_data(feature_file)
no_mfcc = feature_file.split('\\')[-1].strip('.csv').split('_')[-1]
results_file = 'resultsMFCC_{}.csv'.format(no_mfcc)
print(results_file)
results =
|
pd.DataFrame(columns=['No_MFCC', 'Classifier', 'Accuracy'])
|
pandas.DataFrame
|
# Created on 2020/7/15
# This module is for the class TimeSeries and related functions.
# Standard library imports
from datetime import datetime
from typing import Any, Callable, Optional, Union
import warnings
# Third party imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import scipy.stats as stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
from statsmodels.api import OLS
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from typeguard import typechecked
# Local application imports
from .. import exceptions
# Dictionary of Pandas' Offset Aliases
# and their numbers of appearance in a year.
DPOA = {'D': 365, 'B': 252, 'W': 52,
'SM': 24, 'SMS': 24,
'BM': 12, 'BMS': 12, 'M': 12, 'MS': 12,
'BQ': 4, 'BQS': 4, 'Q': 4, 'QS': 4,
'Y': 1, 'A':1}
# Datetimes format
fmt = "%Y-%m-%d %H:%M:%S"
fmtz = "%Y-%m-%d %H:%M:%S %Z%z"
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
@typechecked
def get_list_timezones() -> None:
"""
Lists all the time zone names that can be used.
"""
print(pytz.all_timezones)
return None
# CLASS Series
@typechecked
class Series:
"""
Abstract class defining a Series and its methods.
This class serves as a parent class for TimeSeries and CatTimeSeries.
Attributes
----------
data : pandas.Series or pandas.DataFrame
Contains a time-like index and for each time a single value.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
nvalues : int
Number of values, i.e. also of dates.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the series.
unit : str or None
Unit of the series values.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
"""
def __init__(self,
data: Union[pd.Series, pd.DataFrame, None]=None,
tz: str=None,
unit: str=None,
name: str=None
) -> None:
"""
Receives a panda.Series or pandas.DataFrame as an argument and initializes the time series.
"""
# Deal with DataFrame / Series
if (data is None) or (data.empty is True):
self.data = pd.Series(index=None, data=None)
self.start_utc = None
self.end_utc = None
self.nvalues = 0
self.freq = None
self.name = 'Empty TimeSeries'
else:
# Making sure the user entered a pandas.Series or pandas.DataFrame
# with just an index and one column for values
if isinstance(data, pd.DataFrame):
if data.shape[1] != 1:
raise AssertionError("Time series must be built from a pandas.Series or a pandas.DataFrame with only one value column.")
else:
self.data = pd.Series(data.iloc[:, 0])
elif not isinstance(data, pd.Series):
raise AssertionError("Time series must be built from a pandas.Series or a pandas.DataFrame with only one value column.")
else:
self.data = data
# Deal with time
if type(data.index[0]) == 'str':
data.index = pd.to_datetime(data.index, format=fmt)
self.start_utc = datetime.strptime(str(data.index[0]), fmt)
self.end_utc = datetime.strptime(str(data.index[-1]), fmt)
self.nvalues = data.shape[0]
else:
self.start_utc = data.index[0]
self.end_utc = data.index[-1]
self.nvalues = data.shape[0]
try:
self.freq = pd.infer_freq(self.data.index)
except:
self.freq = 'Unknown'
# Deal with unit
self.unit = unit
# Deal with timezone
if tz is None:
self.tz = 'UTC'
self.timezone = pytz.utc
else:
self.tz = tz
self.timezone = pytz.timezone(tz)
# Deal with name (nickname)
if name is None:
name = ""
self.name = name
def get_start_date_local(self) -> datetime.date:
"""
Returns the attribute UTC start date in local time zone defined by attribute timezone.
"""
start_tmp = datetime.strptime(str(self.start_utc), fmt).astimezone(self.timezone)
return datetime.strftime(start_tmp, format=fmtz)
def get_end_date_local(self) -> datetime.date:
"""
Returns the attribute UTC end date in local time zone defined by attribute timezone.
"""
end_tmp = datetime.strptime(str(self.end_utc), fmt).astimezone(self.timezone)
return datetime.strftime(end_tmp, format=fmtz)
def specify_data(self,
start: Union[str, datetime.date],
end: Union[str, datetime.date]
) -> Union[pd.Series, pd.DataFrame]:
"""
Returns the appropriate data according to user's specifying
or not the desired start and end dates.
"""
# Prepare data
if (start is None) and (end is None):
data = self.data
elif (start is None) and (end is not None):
data = self.data[:end]
elif (start is not None) and (end is None):
data = self.data[start:]
elif (start is not None) and (end is not None):
data = self.data[start:end]
return data
def start_end_names(self,
start: Union[str, datetime.date],
end: Union[str, datetime.date]
) -> (str, str):
"""
Recasts the time series dates to 10 characters strings
if the date hasn't been re-specified (i.e. value is 'None').
"""
s = str(self.start_utc)[:10] if (start is None) else start
e = str(self.end_utc)[:10] if (end is None) else end
return s, e
def is_sampling_uniform(self) -> bool:
"""
Tests if the sampling of a time series is uniform or not.
Returns a boolean value True when the sampling is uniform, False otherwise.
"""
# Prepare data
sampling = [datetime.timestamp(x) for x in self.data.index]
assert(len(sampling)==self.nvalues)
intervals = [sampling[x] - sampling[x-1] for x in range(1,self.nvalues,1)]
# Testing
prev = intervals[0]
for i in range(1,len(intervals),1):
if intervals[i] - prev > 1.e-6:
return False
return True
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
# CLASS TimeSeries
@typechecked
class TimeSeries(Series):
"""
Class defining a time series and its methods.
This class inherits from the parent class 'Series'.
Attributes
----------
data : pandas.Series or pandas.DataFrame
Contains a time-like index and for each time a single value.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
nvalues : int
Number of values, i.e. also of dates.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the series.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
type : str
Type of the series.
unit : str or None
Unit of the time series values.
"""
def __init__(self,
data: Union[pd.Series, pd.DataFrame, None]=None,
tz: str=None,
unit: str=None,
name: str=None
) -> None:
"""
Receives a pandas.Series or pandas.DataFrame as an argument and initializes the time series.
"""
super().__init__(data=data, tz=tz, unit=unit, name=name)
# Add attributes initialization if needed
self.type = 'TimeSeries'
### Plot INFORMATION ABOUT THE TIME SERIES ###
def simple_plot(self,
figsize: (float, float) = (12, 5),
dpi: float=100
) -> None:
"""
Plots the time series in a simple way.
Parameters
----------
figsize : 2-tuple of ints
Dimensions of the figure.
dpi : int
Dots-per-inch definition of the figure.
Returns
-------
None
None
"""
# Plot
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(self.data.index, self.data.values, color='k')
# Make it cute
if self.name is None:
title = "Time series from " + str(self.start_utc)[:10] \
+ " to " + str(self.end_utc)[:10]
else:
title = "Time series " + self.name + " from " + str(self.start_utc)[:10] \
+ " to " + str(self.end_utc)[:10]
if self.tz is None:
xlabel = 'Date'
else:
xlabel = 'Date (' + self.tz + ')'
if self.unit is None:
ylabel = 'Value'
else:
ylabel = 'Value (' + self.unit + ')'
plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)
plt.show()
return None
@typechecked
def distribution(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Plots the distribution of values between two dates.
"""
# Prepare data
data = self.specify_data(start, end)
# Plot distribution of values
plt.figure(figsize=figsize, dpi=dpi)
data.hist(bins=bins, grid=False, color='w', lw=2, edgecolor='k')
# Make it cute
s,e = self.start_end_names(start, end)
title = "Distribution of values between " + s + " and " + e
plt.gca().set(title=title, xlabel="Value", ylabel="Hits")
plt.show()
return None
def density(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Plots the density of values between two dates.
"""
# Prepare data
data = self.specify_data(start, end)
s,e = self.start_end_names(start, end)
# Plot distribution of values
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
data.plot.density(color='k', ax=ax, legend=False)
# Make it cute
title = "Density plot of values between " + s + " and " + e
plt.gca().set(title=title, xlabel="Value", ylabel="Density")
plt.show()
return None
def simple_plot_distrib(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (10, 4),
dpi: float=100
) -> None:
"""
Plots the time series and its associated distribution of values between two dates.
"""
# Checks
assert(isinstance(bins,int))
# Prepare data
data = self.specify_data(start, end)
s,e = self.start_end_names(start, end)
# Plot
fig = plt.figure(figsize=figsize, dpi=dpi)
gs = fig.add_gridspec(1, 4)
# Plot 1 - Time Series simple plot
f_ax1 = fig.add_subplot(gs[:, 0:3])
f_ax1.plot(data.index, data.values, color='k')
if self.name is None:
title1 = "Time series from " + s + " to " + e
else:
title1 = "Time series " + self.name + " from " + s + " to " + e
if self.tz is None:
xlabel = 'Date'
else:
xlabel = 'Date (' + self.tz + ')'
if self.unit is None:
ylabel = 'Value'
else:
ylabel = 'Value (' + self.unit + ')'
plt.gca().set(title=title1, xlabel=xlabel, ylabel=ylabel)
# Plot 2 - Distribution of values
f_ax2 = fig.add_subplot(gs[:, 3:])
data.hist(bins=bins, grid=False, ax=f_ax2, orientation="horizontal", color='w', lw=2, edgecolor='k')
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.3, hspace=0)
title2 = "Distribution"
plt.gca().set(title=title2, xlabel=ylabel, ylabel="Hits")
plt.show()
return None
def get_sampling_interval(self) -> Union[str, datetime.date]:
"""
Returns the sampling interval for a uniformly-sampled time series.
"""
if(self.is_sampling_uniform()==False):
raise exceptions.SamplingError("Time series is not uniformly sampled.")
else:
idx1 = self.data.index[1]
idx0 = self.data.index[0]
intv = datetime.timestamp(idx1) - datetime.timestamp(idx0)
return intv
def lag_plot(self,
lag: int=1,
figsize: (float, float) = (5, 5),
dpi: float=100,
alpha: float=0.5
) -> None:
"""
Returns the scatter plot x_t v.s. x_{t-l}.
"""
# Check
try:
assert(lag>0)
except AssertionError:
raise AssertionError("The lag must be an integer equal or more than 1.")
# Do the plot
fig = plt.figure(figsize=figsize, dpi=dpi)
pd.plotting.lag_plot(self.data, lag=lag, c='black', alpha=alpha)
# Set title
if self.name is None:
tmp_name = " "
else:
tmp_name = self.name
title = "Lag plot of time series " + tmp_name
plt.gca().set(title=title, xlabel="x(t)", ylabel="x(t+"+str(lag)+")")
plt.show()
return None
def lag_plots(self,
nlags: int=5,
figsize: (float, float) = (10, 10),
dpi: float=100,
alpha: float=0.5
) -> None:
"""
Returns a number of scatter plots x_t v.s. x_{t-l}
where l is the lag value taken from [0,...,nlags].
Notes
-----
It is required that nlags > 1.
"""
# Check
try:
assert(nlags>1)
except AssertionError:
raise AssertionError("nlags must be an integer starting from 2.")
# Rule for the number of rows/cols
ncols = int(np.sqrt(nlags))
if(nlags % ncols == 0):
nrows = nlags // ncols
else:
nrows = nlags // ncols + 1
# Do the plots
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True,
figsize=figsize, dpi=dpi)
for i, ax in enumerate(axes.flatten()[:nlags]):
pd.plotting.lag_plot(self.data, lag=i+1, ax=ax, c='black', alpha=alpha)
ax.set_xlabel("x(t)")
ax.set_ylabel("x(t+"+str(i+1)+")")
# Set title
if self.name is None:
tmp_name = " "
else:
tmp_name = self.name
title = "Multiple lag plots of time series " + tmp_name
fig.suptitle(title)
plt.show()
return None
### SIMPLE DATA EXTRACTION ON THE TIME SERIES ###
def hist_avg(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical average of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
avg = data.values.mean()
return avg
def hist_std(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical standard deviation of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
std = data.values.std()
return std
def hist_variance(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical variance of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
var = data.values.var()
return var
def hist_skewness(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical skew of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
skew = stats.skew(data.values)
return skew
def hist_kurtosis(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical (Fisher) kurtosis of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
kurt = stats.kurtosis(data.values, fisher=False)
return kurt
def min(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the minimum of the series.
"""
data = self.specify_data(start, end)
ts_min = data.values.min()
return ts_min
def max(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the maximum of the series.
"""
data = self.specify_data(start, end)
ts_max = data.values.max()
return ts_max
def describe(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> None:
"""
Returns description of time series between two dates.
This uses the pandas function having same name.
"""
data = self.specify_data(start, end)
print(data.describe())
return None
### METHODS THAT ARE CLOSER TO FINANCIAL APPLICATIONS ###
def percent_change(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Returns the percent change of the series (in %).
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from the time series.
"""
data = self.specify_data(start, end)
new_data = data.pct_change()
new_ts = TimeSeries(data=new_data[1:], tz=self.tz, unit='%', name=name)
return new_ts
# Alias method of percent_change()
# For people with a Finance terminology preference
net_returns = percent_change
def gross_returns(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Returns the gross returns of the series (in %),
i.e. percent change + 1.
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from the time series.
"""
data = self.specify_data(start, end)
new_data = 1 + data.pct_change()
new_ts = TimeSeries(new_data[1:], tz=self.tz, name=name)
return new_ts
def hist_vol(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Computes the net returns of the time series and
returns their associated historical volatility
between two dates (default is the whole series).
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from calculation.
Since pandas.Series.pct_change() returns values in
percent, we divide by 100 to bring back numerical values.
"""
# Initialization
data = self.specify_data(start, end)
# Warning message
if (self.is_sampling_uniform() is not True) and (verbose is True):
warnings.warn("Index not uniformly sampled. Result could be meaningless.")
# Warning message
if (0. in data.values) and (verbose is True):
warnings.warn("Zero value in time series, will generate infinite return.")
# Computing net returns
net_returns = data.pct_change()[1:]
# Compute standard deviation, i.e. volatility
std = net_returns.values.std()
return std
def annualized_vol(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the annualized volatility of the time series
between two dates (default is the whole series),
using the frequency of the time series when usable.
"""
# Initializations
hvol = self.hist_vol(start, end, verbose=verbose)
if (self.freq is not None) and (self.freq in DPOA.keys()):
return hvol * np.sqrt(DPOA[self.freq])
else:
raise ValueError('Annualized volatility could not be evaluated.')
def annualized_return(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the annualized return of the time series
between two dates (default is the whole series),
using the frequency of the time series when usable.
Arguments
---------
start : str or datetime
Starting date of selection.
end : str or datetime
Ending date of selection.
verbose : bool
Verbose option.
Returns
-------
float
Annualized return.
"""
# Initializations
gross_returns = self.gross_returns(start, end)
# Compute product of values
prd = gross_returns.data.prod()
# Checks
if (start is None) and (end is None):
assert(gross_returns.nvalues == self.nvalues-1)
if (gross_returns.freq != self.freq) and (verbose is True):
warning_message = "Gross_returns frequency and time series frequency do not match." \
+ " In that context, results may be meaningless."
warnings.warn(warning_message)
if (self.freq is not None) and (self.freq in DPOA.keys()):
return prd**(DPOA[self.freq]/gross_returns.nvalues) - 1
else:
raise ValueError('Annualized return could not be evaluated.')
def risk_ratio(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the risk ratio, i.e. the ratio of annualized return
over annualized volatility.
"""
ann_return = self.annualized_return(start, end)
ann_volatility = self.annualized_vol(start, end, verbose=verbose)
return ann_return / ann_volatility
def annualized_Sharpe_ratio(self,
risk_free_rate: float=0,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the Sharpe ratio, also known as risk adjusted return.
"""
ann_return = self.annualized_return(start, end)
ann_volatility = self.annualized_vol(start, end, verbose=verbose)
return (ann_return - risk_free_rate) / ann_volatility
### METHODS RELATED TO VALUE AT RISK ###
def hist_var(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the historical p-VaR (Value at Risk) between two dates.
Returns
-------
float
VaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
if 100 * p % 1 != 0:
warning_message = f"Probability too precise, only closest percentile computed here." \
+ f"Hence for p = {str(p)} , percentile estimation is based on p = {str(int(100 * p))} %."
warnings.warn(warning_message)
# Prepare data
data = self.specify_data(start, end)
return np.percentile(data.values, int(100*p))
def hist_cvar(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the historical CVaR (Conditional Value at Risk) between two dates.
This quantity is also known as the Expected Shortfall (ES).
Returns
-------
float
CVaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
if 100*p%1 != 0:
warning_message = "Probability too precise, only closest percentile computed here." \
+ "Hence for p = " + str(p) + " , percentile estimation is based on p = " + str(int(100*p)) + " %."
warnings.warn(warning_message)
# Prepare data
data = self.specify_data(start, end)
var = self.hist_var(p=p, start=start, end=end)
# Computing CVaR
tmp_sum = 0
tmp_n = 0
for val in data.values:
if val <= var:
tmp_sum += val
tmp_n += 1
return tmp_sum / tmp_n
# Alias method of hist_cvar
# For people with a Finance terminology preference
hist_expected_shortfall = hist_cvar
def cornish_fisher_var(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the VaR (Value at Risk) between two dates from
the Cornish-Fisher expansion.
Returns
-------
float
VaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
# Prepare data
data = self.specify_data(start, end)
# Compute z-score based on normal distribution
z = stats.norm.ppf(p)
# Compute modified z-score from expansion
s = stats.skew(data.values)
k = stats.kurtosis(data.values, fisher=False)
new_z = z + (z**2 - 1) * s/6 + (z**3 - 3*z) * (k-3)/24 \
- (2*z**3 - 5*z) * (s**2)/36
return data.values.mean() + new_z * data.values.std(ddof=0)
### AUTOCORRELATION COMPUTATION ###
def autocorrelation(self,
lag: int=1,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the autocorrelation of the time series for a specified lag.
We use the function:
$rho_l = frac{Cov(x_t, x_{t-l})}{\sqrt(Var[x_t] Var[x_{t-l}])}
where $x_t$ is the time series at time t.
Cov denotes the covariance and Var the variance.
We also use the properties $rho_0 = 1$ and $rho_{-l} = rho_l$
(using LaTeX notations here).
"""
# Initialization
l = abs(lag)
# Trivial case
if l==0:
return 1
# Prepare data
data = self.specify_data(start, end)
# General case
assert(l < data.shape[0])
shifted_data = data.shift(l)
mu = data.mean()
sigma = data.std()
numerator = np.mean((data - mu) * (shifted_data - mu))
denominator = sigma**2
return numerator / denominator
def plot_autocorrelation(self,
lag_min: int=0,
lag_max: int=25,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Uses autocorrelation method in order to return a plot
of the autocorrelation againts the lag values.
"""
# Checks
assert(lag_max > lag_min)
# Computing autocorrelation
x_range = list(range(lag_min, lag_max+1, 1))
ac = [self.autocorrelation(lag=x, start=start, end=end) for x in x_range]
# Plot
plt.figure(figsize=figsize, dpi=dpi)
plt.bar(x_range, ac, color='w', lw=2, edgecolor='k')
s,e = self.start_end_names(start, end)
title = "Autocorrelation from " + s + " to " + e + " for lags = [" \
+ str(lag_min) + "," + str(lag_max) + "]"
plt.gca().set(title=title, xlabel="Lag", ylabel="Autocorrelation Value")
plt.show()
return None
def acf_pacf(self,
lag_max: int=25,
figsize: (float, float) = (12, 3),
dpi: float=100
) -> None:
"""
Returns a plot of the AutoCorrelation Function (ACF)
and Partial AutoCorrelation Function (PACF) from statsmodels.
"""
# Plot
fig, axes = plt.subplots(1,2, figsize=figsize, dpi=dpi)
plot_acf(self.data.values.tolist(), lags=lag_max, ax=axes[0])
plot_pacf(self.data.values.tolist(), lags=lag_max, ax=axes[1])
plt.show()
return None
### SIMPLE TRANSFORMATIONS OF THE TIME SERIES TO CREATE A NEW TIME SERIES ###
def trim(self,
new_start: Union[str, datetime.date],
new_end: Union[str, datetime.date]
) -> 'TimeSeries':
"""
Method that trims the time series to the desired dates
and send back a new time series.
"""
new_data = self.data[new_start:new_end]
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def add_cst(self,
cst: float=0
) -> 'TimeSeries':
"""
Method that adds a constant to the time series.
"""
new_data = self.data + cst
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def mult_by_cst(self,
cst: float=1
) -> 'TimeSeries':
"""
Method that multiplies the time series by a constant.
"""
new_data = self.data * cst
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def linear_combination(self,
other_ts: 'TimeSeries',
factor1: float=1,
factor2: float=1):
"""
Method that adds a time series to the current one
according to linear combination:
factor1 * current_ts + factor2 * other_ts.
"""
# Checks
if (self.unit != other_ts.unit):
raise AssertionError("Time series to combine must have same unit.")
# Compute linear combination
new_data = factor1 * np.array(self.data.values) + factor2 * np.array(other_ts.data.values)
new_data = pd.Series(index=self.data.index, data=new_data)
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def convolve(self,
func: Callable[[float], float],
x_min: float,
x_max: float,
n_points: int,
normalize: bool=False,
name: str=None
) -> 'TimeSeries':
"""
Performs a convolution of the time series with a function 'func'.
The 'normalize' option allows to renormalize 'func' such that
the sum of its values is one.
Parameters
----------
func : function
Function we want to employ for convolution.
x_min : float
Minimum value to consider for 'func'.
x_max : float
Maximum value to consider for 'func'.
n_points : int
Number of points to consider in the function.
normalize: bool
Option to impose the sum of func values to be 1.
name : str
New name.
Returns
-------
TimeSeries
Convolved time series.
"""
# Getting the time series values
ts_vals = self.data.values
# Getting the convolving function values
X = np.linspace(x_min, x_max, n_points)
func_vals = []
for x in X:
func_vals.append(func(x))
if normalize==True:
sum_vals = np.array(func_vals).sum()
func_vals /= sum_vals
# Dealing with name
if name is None:
name = self.name + str('-Convolved')
# Generate convolved values
convolved_vals = np.convolve(func_vals, ts_vals.flatten(), mode='same')
if name is None:
name = "Convolved-" + self.name
convolved_ts = TimeSeries(data=pd.Series(index=self.data.index, data=convolved_vals),
tz=self.tz,
unit=self.unit,
name=name)
return convolved_ts
def get_drawdowns(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Computes the drawdowns and returns a new time series from them.
Returns
-------
TimeSeries
Time series of the drawdowns.
"""
# Prepare data
data = self.specify_data(start, end)
# Compute drawdowns
trailing_max = data.cummax()
drawdowns = (data - trailing_max) / trailing_max
# Make a time series from them
new_ts = TimeSeries(data=drawdowns, tz=self.tz, name=name)
return new_ts
def max_drawdown(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the maximum drawdown of a time series.
Returns
-------
float
Maximum drawdown.
"""
# Prepare data
data = self.specify_data(start, end)
# Compute drawdowns
trailing_max = data.cummax()
drawdowns = (data - trailing_max) / trailing_max
max_drawdowns = -drawdowns.values.min()
return max_drawdowns
def divide_by_timeseries(self,
other_ts: 'TimeSeries',
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Returns a time series from the division of the current time series
with another time series (current_ts / other_ts).
Returns
-------
TimeSeries
Division time series.
"""
# Prepare data
data = self.specify_data(start, end)
# Check that data has the same index
# as the dividing time series
assert(data.index.tolist() == other_ts.data.index.tolist())
# Do the division
new_data = np.array(data.values) / np.array(other_ts.data.values)
new_data =
|
pd.Series(index=data.index, data=new_data)
|
pandas.Series
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import DBSCAN
class ClusterBound:
def __init__(self, x1, y1, w, h):
self.x1 = x1
self.x2 = x1 + w
self.y1 = y1
self.y2 = y1 + h
def contains(self, xp, yp):
return self.x1 <= xp <= self.x2 and self.y1 <= yp <= self.y2
class ManualClusterModel():
def __init__(self, cluster_bounds):
self.cluster_bounds = cluster_bounds
def fit(self, X):
def find_cluster(x, cluster_bounds):
for i, c in enumerate(cluster_bounds):
if c.contains(x[0], x[1]):
return i
return -1
self.labels_ = X.apply(lambda x: find_cluster(x, self.cluster_bounds), axis=1)
MirageCalloutClusteringModel = ManualClusterModel([
ClusterBound(162, 169, 64, 65), # van
ClusterBound(227, 173, 32, 41), # b plat
ClusterBound(259, 173, 89, 40), # b front apt
ClusterBound(112, 231, 49, 93), # bench
ClusterBound(162, 214, 167, 41), # b default out of site
ClusterBound(203, 254, 68, 75), # b site
ClusterBound(170, 395, 32, 90), # kitchen door
ClusterBound(207, 396, 133, 90), # kitchen
ClusterBound(342, 234, 54, 46), # side cat
ClusterBound(342, 280, 160, 45), # cat site
ClusterBound(430, 328, 28, 119), # underpass
ClusterBound(463, 409, 218, 38), # cat
ClusterBound(396, 435, 32, 62), # window
ClusterBound(433, 446, 60, 59), # bottom mid
ClusterBound(495, 448, 59, 56), # mid mid
ClusterBound(556, 447, 131, 56), # top mid
ClusterBound(682, 313, 69, 124), # top top mid
ClusterBound(712, 440, 39, 59), # boxes
ClusterBound(383, 571, 84, 79), # jungle
ClusterBound(482, 508, 65, 91), # connector
ClusterBound(573, 504, 179, 28), # mid chair
ClusterBound(469, 601, 66, 54), # connector by stairs
ClusterBound(538, 601, 29, 69), # stairs
ClusterBound(643, 696, 42, 86), # palace deck/shadow
ClusterBound(382, 498, 45, 71), # mid window hidden
ClusterBound(648, 783, 50, 40), # front palace
ClusterBound(441, 827, 43, 49), # ticket booth
ClusterBound(319, 772, 149, 56), # ct
ClusterBound(164, 332, 175, 60), # b market side
ClusterBound(692, 627, 127, 57), # A ramp
ClusterBound(568, 646, 30, 20), # sandwich
ClusterBound(617, 624, 37, 29), # tetris
ClusterBound(480, 741, 42, 47), # triple box
ClusterBound(579, 791, 51, 35), # firebox
ClusterBound(521, 737, 93, 51), # front a site
ClusterBound(479, 671, 158, 65), # open a site
ClusterBound(463, 329, 52, 79) # b short
])
#Convert map coordinates to image coordinates, from <NAME>'s analysis
def pointx_to_resolutionx(xinput,startX=-3217,endX=1912,resX=1024):
sizeX = endX - startX
if startX < 0:
xinput += startX * (-1.0)
else:
xinput += startX
xoutput = float((xinput / abs(sizeX)) * resX);
return xoutput
def pointy_to_resolutiony(yinput,startY=-3401,endY=1682,resY=1024):
sizeY=endY-startY
if startY < 0:
yinput += startY *(-1.0)
else:
yinput += startY
youtput = float((yinput / abs(sizeY)) * resY);
return resY-youtput
def cluster_positions(firefight_df, cluster_map, verbose=False, scale=True):
"""
Clusters the dataframe spatially into common positions by type of position, map, and team. Clusters DMG_VIC and DMG_ATT together.
Input:
cluster_df: result of DataLoader.load_firefight_df, with columns ['file_round', 'seconds', 'pos_x', 'pos_y', 'hp_dmg']
eps_map: the eps to use for DBSCAN for each pos_type
Output:
the input cluster_df, with new columns ['pos_cluster']
"""
min_max_scaler = MinMaxScaler()
cluster_df = firefight_df.copy()
if scale:
cluster_df[["pos_x", "pos_y"]] = min_max_scaler.fit_transform(cluster_df[["pos_x", "pos_y"]])
cluster_df['pos_cluster'] = None
for map_name in cluster_df['map'].unique():
for team in cluster_df['att_side'].unique():
# Cluster nade positions
for pos_type in [t for t in cluster_df['pos_type'].unique() if t not in ['DMG_VIC', 'DMG_ATT']]:
mask = (cluster_df['map'] == map_name) & (cluster_df['pos_type'] == pos_type) & (cluster_df['att_side'] == team)
group = cluster_df[mask]
# https://medium.com/@tarammullin/dbscan-parameter-estimation-ff8330e3a3bd
cluster_model = cluster_map[pos_type]
#cluster_model = DBSCAN(eps=0.05, min_samples=min_samples)
pts = pd.concat([group['pos_x'], group['pos_y']], axis=1)
cluster_model.fit(pts)
firefight_df.loc[mask, 'pos_cluster'] = cluster_model.labels_
if verbose:
print(f"{team}, {pos_type}, {map_name}: {np.unique(cluster_model.labels_, return_counts=True)}")
# Cluster attack/victim positions
print(cluster_df['pos_type'].unique())
mask = ((cluster_df['pos_type'] == 'DMG_VIC') | (cluster_df['pos_type'] == 'DMG_ATT')) & (cluster_df['att_side'] == team) & (cluster_df['map'] == map_name)
group = cluster_df[mask]
# https://medium.com/@tarammullin/dbscan-parameter-estimation-ff8330e3a3bd
cluster_model = cluster_map['DMG']
#cluster_model = DBSCAN(eps=0.05, min_samples=min_samples)
pts = pd.concat([group['pos_x'], group['pos_y']], axis=1)
cluster_model.fit(pts)
firefight_df.loc[mask, 'pos_cluster'] = cluster_model.labels_
if verbose:
print(f"{team}, DMG, {map_name}: {np.unique(cluster_model.labels_, return_counts=True)}")
return firefight_df
def cluster_firefights(firefight_df, eps=0.08, min_samples=6, n_seconds_equiv_to_quarter_map=20, verbose=False, return_scaled=False):
"""
Clusters the dataframe spatio-temporally into "firefights" - groups of points within a round that
are within a similar space and time. Also calculates the net damage taken by either team within each
firefight.
Input:
cluster_df: result of DataLoader.load_firefight_df, with columns ['file_round', 'seconds', 'pos_x', 'pos_y', 'hp_dmg']
eps: the eps to use for DBSCAN
min_samples: the min_samples to use for DBSCAN
n_seconds_equiv_to_quarter_map: The number of seconds considered equivalent to a quarter of the map when clustering.
Output:
the input cluster_df, with new columns ['firefight_cluster', 'firefight_net_t_dmg', ''firefight_net_ct_dmg']
"""
max_round_length = firefight_df['seconds'].max()
min_max_scaler = MinMaxScaler()
cluster_df = firefight_df.copy()
cluster_df[["seconds", "pos_x", "pos_y"]] = min_max_scaler.fit_transform(cluster_df[["seconds", "pos_x", "pos_y"]])
# scale time so that 20 seconds is roughly equivalent to one quarter of the map
cluster_df['seconds'] *= (max_round_length/n_seconds_equiv_to_quarter_map) * (1/4)
# cluster firefights spatio-temporally
firefight_df['firefight_cluster'] = None
cluster_df['firefight_cluster'] = None
firefight_df['firefight_net_t_dmg'] = None
firefight_df['firefight_net_ct_dmg'] = None
num_filerounds = len(cluster_df['file_round'].unique())
for i, (name, group) in enumerate(cluster_df.groupby('file_round')):
# https://medium.com/@tarammullin/dbscan-parameter-estimation-ff8330e3a3bd
cluster_model = DBSCAN(eps=eps, min_samples=min_samples)
pts = pd.concat([group['seconds'], group['pos_x'], group['pos_y']], axis=1)
cluster_model.fit(pts)
cluster_df.loc[(firefight_df['file_round'] == name), 'firefight_cluster'] = cluster_model.labels_
firefight_df.loc[(firefight_df['file_round'] == name), 'firefight_cluster'] = cluster_model.labels_
if verbose:
print(f"{i}/{num_filerounds}, {name}: {np.unique(cluster_model.labels_)}")
# Find net damage for each firefight
for name, group in cluster_df.groupby(['file_round', 'firefight_cluster']):
ct_att_pts = group[(group['pos_type'] == 'DMG_VIC') & (group['att_side'] == 'CounterTerrorist')]
t_att_pts = group[(group['pos_type'] == 'DMG_VIC') & (group['att_side'] == 'Terrorist')]
t_net_dmg = np.sum(t_att_pts['hp_dmg'])
ct_net_dmg = np.sum(ct_att_pts['hp_dmg'])
mask = (firefight_df['file_round'] == name[0]) & (firefight_df['firefight_cluster'] == name[1])
firefight_df.loc[mask, 'firefight_net_t_dmg'] = t_net_dmg
firefight_df.loc[mask, 'firefight_net_ct_dmg'] = ct_net_dmg
cluster_df.loc[mask, 'firefight_net_t_dmg'] = t_net_dmg
cluster_df.loc[mask, 'firefight_net_ct_dmg'] = ct_net_dmg
if verbose:
print(f"{name}: t_dmg={t_net_dmg}, ct_dmg={ct_net_dmg}")
if return_scaled:
return cluster_df
return firefight_df
class DataLoader:
def __init__(self, use_data_pt2=False):
self.use_data_pt2 = use_data_pt2
def load_map_df(self):
map_df = pd.read_csv('../data/map_data.csv')
map_df = map_df.rename( columns={'Unnamed: 0':'map_name'}).set_index('map_name')
return map_df
def load_meta_df(self):
meta_df = pd.read_csv('../data/esea_meta_demos.part1.csv')
if self.use_data_pt2:
meta_df = meta_df.append(pd.read_csv('../data/esea_meta_demos.part2.csv'))
meta_df = meta_df[['file', 'map', 'round', 'start_seconds', 'winner_side', 'round_type', 'ct_eq_val', 't_eq_val']]
return meta_df
def load_dmg_df(self, nrows=None, scale_to_map=True, dropna=True, map_name=None):
dmg_df = pd.read_csv('../data/esea_master_dmg_demos.part1.csv', nrows=nrows)
if self.use_data_pt2:
dmg_df = dmg_df.append(pd.read_csv('../data/esea_master_kills_demos.part2.csv', nrows=None if nrows is None else nrows - len(dmg_df)))
dmg_df = dmg_df[['file', 'round', 'seconds', 'att_side', 'vic_side', 'is_bomb_planted', 'bomb_site', 'hp_dmg', 'arm_dmg', 'hitbox', 'wp', 'wp_type', 'att_id', 'vic_id', 'att_pos_x', 'att_pos_y', 'vic_pos_x', 'vic_pos_y']]
meta_df = self.load_meta_df()
dmg_df = pd.merge(dmg_df, meta_df, how='left', left_on=['file','round'], right_on = ['file','round'])
if map_name is not None:
dmg_df = dmg_df[dmg_df['map'] == map_name]
dmg_df['seconds'] -= dmg_df['start_seconds']
dmg_df = dmg_df.drop(columns=['start_seconds'])
if scale_to_map:
map_df = self.load_map_df()
for map_info in map_df.iterrows():
map_name = map_info[0]
map_data = map_info[1]
mask = (dmg_df['map'] == map_name)
map_df = dmg_df[mask]
dmg_df.loc[mask, 'att_pos_y'] = map_df['att_pos_y'].apply(pointy_to_resolutiony, args=(map_data['StartY'], map_data['EndY'], map_data['ResY']))
dmg_df.loc[mask, 'att_pos_x'] = map_df['att_pos_x'].apply(pointx_to_resolutionx, args=(map_data['StartX'], map_data['EndX'], map_data['ResX']))
dmg_df.loc[mask, 'vic_pos_y'] = map_df['vic_pos_y'].apply(pointy_to_resolutiony, args=(map_data['StartY'], map_data['EndY'], map_data['ResY']))
dmg_df.loc[mask, 'vic_pos_x'] = map_df['vic_pos_x'].apply(pointx_to_resolutionx, args=(map_data['StartX'], map_data['EndX'], map_data['ResX']))
if dropna:
dmg_df = dmg_df.dropna()
return dmg_df
def load_kill_df(self, nrows=None):
kill_df = pd.read_csv('../data/esea_master_dmg_demos.part1.csv', nrows=nrows)
if self.use_data_pt2:
kill_df = grenade_df.append(
|
pd.read_csv('../data/esea_master_kills_demos.part2.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in a TypeError
msg = r"'fill_value' \('d'\) is not in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value='d')
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
msg = ("level should contain all level names or all level numbers, not"
" a mixture of the two")
with pytest.raises(ValueError, match=msg):
df2.stack(level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with pytest.raises(ValueError):
df.unstack('c1')
with pytest.raises(ValueError):
df.T.stack('c1')
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = pd.MultiIndex.from_product([['a'], ['A', 'B', 'C', 'D']])[:-1]
df = pd.DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = pd.MultiIndex.from_product([[0, 1], ['A', 'B', 'C']])
expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=['a'],
columns=exp_col)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = pd.MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = pd.DataFrame(np.concatenate([block * 2, block * 2 + 1],
axis=1),
columns=idx)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# With mixed dtype and NaN
levels = [['a', 2, 'c'], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = pd.MultiIndex(levels, codes)
data = np.arange(8)
df = pd.DataFrame(data.reshape(4, 2), index=idx)
cases = ((0, [13, 16, 6, 9, 2, 5, 8, 11],
[np.nan, 'a', 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16],
[np.nan, 5, 1], [np.nan, 'a', 2]))
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(exp_data.reshape(3, 6),
index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [['A', 'C'], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = pd.DataFrame([[2010, 'a', 'I'],
[2011, 'b', 'II']],
columns=['A', 'B', 'C'])
ind = df.set_index(['A', 'B', 'C'], drop=False)
selection = ind.loc[(slice(None), slice(None), 'I'), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product([expected.columns, ['I']],
names=[None, 'C'])
expected.index = expected.index.droplevel('C')
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', np.nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [np.nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [np.nan] * 3 +
['z'] * 4 + [np.nan] * 3 + ['x'] * 3 + [np.nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([np.nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
|
assert_frame_equal(left, right)
|
pandas.util.testing.assert_frame_equal
|
"""
Written by <NAME>, UC Berkeley/ Lawrence Berkeley National Labs, NSDS Lab
<NAME>, UC Berkeley
This code is intended to create and implement structure supervised classification of coarsely
segmented trial behavior from the ReachMaster experimental system.
Functions are designed to work with a classifier of your choice.
Operates on a single block.
Edited: 9/14/2021
Required Folder 'DataFrames" with all kin and exp datafiles
"""
import argparse
import os
import matplotlib.pyplot as plt
import sklearn
from scipy import ndimage
import Classification_Utils as CU
import pandas as pd
import numpy as np
import h5py
import random
import joblib # for saving sklearn models
from imblearn.over_sampling import SMOTE # for adjusting class imbalances
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
from imblearn.pipeline import Pipeline as imblearn_Pipeline
from collections import Counter
# classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV, train_test_split, GridSearchCV, cross_validate, cross_val_score
from sklearn.pipeline import make_pipeline, Pipeline
# from imblearn.pipeline import Pipeline as imblearnPipeline
from sklearn.feature_selection import SelectKBest # feature selection
from sklearn.feature_selection import f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.neural_network import MLPClassifier
# set global random seed for reproducibility #
random.seed(246810)
np.random.seed(246810)
# Create folder in CWD to save data and plots #
current_directory = os.getcwd()
folder_name = 'ClassifyTrials'
final_directory = os.path.join(current_directory, folder_name)
if not os.path.exists(final_directory):
os.makedirs(final_directory)
class ReachClassifier:
# set random set for reproducibility
random.seed(246810)
np.random.seed(246810)
def __init__(self, model=None):
self.model = model
self.X = None
self.y = None
self.X_train = None
self.y_train = None
self.X_val = None
self.y_val = None
self.fs = None
def set_model(self, data):
self.model = data
def set_X(self, data):
self.X = data
def set_y(self, data):
self.y = data
def set_X_train(self, data):
self.X_train = data
def set_y_train(self, data):
self.y_train = data
def set_X_val(self, data):
self.X_val = data
def set_y_val(self, data):
self.y_val = data
def set_fs(self, data):
self.fs = data
def fit(self, X, y):
"""
Fits model to data.
Args:
X: features
y: labels
Returns: None
"""
self.model.fit(X, y)
def predict(self, X):
"""
Returns trained model predictions.
Args:
X: features
y: labels
Returns: preds
"""
return self.model.predict(X)
@staticmethod
def partition(X, y):
"""
Partitions data.
Args:
X: features
y: labels
Returns: X_train, X_val, y_train, y_val
"""
# partition into validation set
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
return X_train, X_val, y_train, y_val
@staticmethod
def evaluate(model, X, y):
"""
Performs 5-fold cross-validation and returns accuracy.
Args:
model: sklearn model
X: features
y: labels
Returns: avg_train_accuracy, avg_test_accuracy
"""
print("Cross validation:")
cv_results = cross_validate(model, X, y, cv=5, return_train_score=True)
train_results = cv_results['train_score']
test_results = cv_results['test_score']
avg_train_accuracy = sum(train_results) / len(train_results)
avg_test_accuracy = sum(test_results) / len(test_results)
print('averaged train accuracy:', avg_train_accuracy)
print('averaged validation accuracy:', avg_test_accuracy)
return avg_train_accuracy, avg_test_accuracy
@staticmethod
def adjust_class_imbalance(X, y):
"""
Adjusts for class imbalance.
Object to over-sample the minority class(es) by picking samples at random with replacement.
The dataset is transformed, first by oversampling the minority class, then undersampling the majority class.
Returns: new samples
References: https://machinelearningmastery.com/smote-oversampling-for-imbalanced-classification/
"""
oversampler = SMOTE(random_state=42)
# undersampler = RandomUnderSampler(random_state=42)
steps = [('o', oversampler)] # , ('u', undersampler)]
pipeline = imblearn_Pipeline(steps=steps)
X_res, y_res = pipeline.fit_resample(X, y)
return X_res, y_res
@staticmethod
def hyperparameter_tuning(X_train, X_val, y_train, y_val, model, param_grid, fullGridSearch=False):
"""
Performs hyperparameter tuning and returns best trained model.
Args:
model: sklearn
param_grid: grid of models and hyperparameters
fullGridSearch: True to run exhaustive param search, False runs RandomizedSearchCV
Returns:
tuned model
parameters found through search
accuracy of tuned model
Reference: https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74
"""
# Use the random grid to search for best hyperparameters
if fullGridSearch:
# Instantiate the grid search model
grid_search = GridSearchCV(estimator=model, param_grid=param_grid,
cv=3, n_jobs=-1, verbose=2)
else:
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
grid_search = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=2, cv=5,
random_state=42, verbose=2, n_jobs=-1)
# Fit the random search model
grid_search.fit(X_train, y_train)
base_model = RandomForestClassifier()
base_model.fit(X_train, y_train)
base_train_accuracy, base_test_accuracy = ReachClassifier.evaluate(base_model, X_val, y_val)
best_grid = grid_search
best_model = grid_search.best_estimator_
best_train_accuracy, best_test_accuracy = ReachClassifier.evaluate(best_model, X_val, y_val)
print('Improvement % of', (100 * (best_test_accuracy - base_test_accuracy) / base_test_accuracy))
return best_model, best_grid.best_params_, best_test_accuracy
@staticmethod
def mean_df(df):
"""
Maps np.mean to all cells in df. For generating features.
Args:
df: (df)
Returns: df with mean of each cell as its values
"""
mean_df = df.applymap(np.mean)
return mean_df
@staticmethod
def do_feature_selection(X, y, k):
"""
Defines the feature selection and applies the feature selection procedure to the dataset.
Fit to data, then transform it.
Args:
k: top number of features to select
Returns: (array shape trials x k features) subset of the selected input features and feature estimator
references: https://machinelearningmastery.com/feature-selection-with-numerical-input-data/
"""
# configure to select a subset of features
fs = SelectKBest(score_func=f_classif, k=k)
# learn relationship from training data
fs.fit(X, y)
# transform train input data
X_train_fs = fs.transform(X)
return X_train_fs, fs
@staticmethod
def plot_features(fs, X):
"""
Plots and saves feature importances.
Returns: None
"""
for i in range(len(fs.scores_)):
print('Feature %d: %f' % (i, fs.scores_[i]))
# plot the scores
# x = [i for i in range(len(fs.scores_))]
x = X.columns
plt.bar(x, fs.scores_)
# rotate x axis to avoid overlap
plt.xticks(rotation=45)
plt.yticks(rotation=90)
plt.title("Input Features vs. Feature Importance")
plt.ylabel("Mutual Information Feature Importance")
plt.xlabel("Input Features")
plt.savefig(f'{folder_name}/feat_importance.png')
@staticmethod
def pre_classify(X, y, k=10):
"""
Partitions, adjusts class imbalance, and performs feature selection.
Args:
X: features
y: labels
k: (int) number of features to select
Returns: data ready for ML classification
"""
# adjust class imbalance
X_res, y_res = ReachClassifier.adjust_class_imbalance(X, y)
# feat selection
X_selected, fs = ReachClassifier.do_feature_selection(X_res, y_res, k)
return X_selected, y_res, fs
@staticmethod
def train_and_validate(X, y, param_grid, save=True, filename=None):
"""
Trains and Validates.
Args:
X: features
y: labels
param_grid: model and hyperparameters to search over
save: (bool) True to save model
filename: (str) name of model to save as
Returns: trained model, train model's CV score
"""
# partition
X_train, X_val, y_train, y_val = ReachClassifier.partition(X, y)
# hyperparameter and model tuning
base_model = Pipeline(steps=[('standardscaler', StandardScaler()),
('classifier', RandomForestClassifier())])
best_model, best_params_, best_test_accuracy = ReachClassifier.hyperparameter_tuning(
X_train, X_val, y_train, y_val, base_model, param_grid, fullGridSearch=False)
# fit and validate
best_model.fit(X_train, y_train)
_, val_score = ReachClassifier.evaluate(best_model, X_val, y_val)
# fit on all training data
best_model.fit(X, y)
# save model
if save:
joblib.dump(best_model, f"{filename}.joblib")
# print("MODEL SCORE", best_model.score(X_val_selected, y_val))
print("BEST MODEL", best_model)
print("CV SCORE", val_score)
return best_model, val_score
class ClassificationHierarchy:
random.seed(246810)
np.random.seed(246810)
def __init__(self):
pass
def split(self, preds, X, y, onesGoLeft=True):
"""
Splits X and y based on predictions.
Args:
preds: (list of ints) predictions of ones and zeros
X: features
y: labels
onesGoLeft: (bool) True for labels with prediction 1 to be on LHS.
Returns: split X, y data
"""
row_mask = list(map(bool, preds)) # True for 1, False otherwise
negate_row_mask = ~np.array(row_mask) # True for 0, False otherwise
if onesGoLeft:
X_left = X[row_mask]
y_left = y[row_mask]
X_right = X[negate_row_mask]
y_right = y[negate_row_mask]
else:
X_right = X[row_mask]
y_right = y[row_mask]
X_left = X[negate_row_mask]
y_left = y[negate_row_mask]
return X_left, y_left, X_right, y_right
def run_hierarchy(self, X, y, param_grid, models, save_models):
"""
Makes predictions through the whole classification hierarchy.
Args:
X: features
y: labels (Trial Type Num Reaches Which Hand)
param_grid: grid
models: (list) list of trained models or None
save_models: (bool) True to save
Returns:
"""
# load models
# model_0, model_1, model_2 = None, None, None
# if models:
# model_0 = joblib.load(models[0])
# model_1 = joblib.load(models[1])
# model_2 = joblib.load(models[2])
# TRIAL TYPE
classifier = ReachClassifier()
y_0 = y['Trial Type'].values # 0 for not null
y_0 = CU.onehot_nulls(y_0)
model_0, val_score_0 = self.fit(classifier, X, y_0, param_grid, save_models,
f'{folder_name}/TrialTypeModel')
# SPLIT
# X_null, y_null, X_NotNull, y_NotNull = self.split(preds_0, X, y, onesGoLeft=True) # 1 if null, 0 if real trial
# NUM REACHES
y_1 = y['Num Reaches'].values
y_1 = CU.onehot_num_reaches(y_1) # 0 if <1, 1 if > 1 reaches
classifier = ReachClassifier()
model_1, val_score_1 = self.fit(classifier, X, y_1, param_grid, save_models,
f'{folder_name}/NumReachesModel')
# SPLIT
# X_greater, y_greater, X_less, y_less = self.split(preds_1, X_NotNull, y_NotNull, onesGoLeft=True) # 0 if <1, 1 if > 1 reaches
# WHICH HAND
classifier = ReachClassifier()
y_2 = y['Which Hand'].values # # classify 0 as r/l
y_2 = CU.hand_type_onehot(y_2)
model_2, val_score_2 = self.fit(classifier, X, y_2, param_grid, save_models,
f'{folder_name}/WhichHandModel')
# X_bi, y_bi, X_rl, y_rl = self.split(preds_2, X_less, y_less, onesGoLeft=True) # classify 0 as r/l, 1 or non r/l
return [val_score_0, val_score_1, val_score_2]
def fit(self, classifier, X, y, param_grid, save, filename):
"""
Trains, validates, and/or makes predictions.
Args:
classifier: ReachClassifier object
X: features
y: labels
param_grid: grid
save: (bool) True to save
filename: (str) file name to save model as
best_model: model
doFit: (bool) True to train
Returns: model, validation score, predicitons
"""
# adjust class imbalance, feature selection
X_selected, y_res, fs = classifier.pre_classify(X, y)
# train and validate
assert (y is not None)
best_model, val_score = classifier.train_and_validate(X_selected, y_res, param_grid, save=save,
filename=filename)
return best_model, val_score
def run_hierarchy_pretrained(self, X, y, models):
"""
Makes predictions through the whole classification hierarchy.
Args:
X: features
y: labels (Trial Type Num Reaches Which Hand)
models: (list of str) list of trained models
Returns: list of validation accuracies
"""
# load models
model_0 = joblib.load(models[0])
model_1 = joblib.load(models[1])
model_2 = joblib.load(models[2])
# TRIAL TYPE
classifier = ReachClassifier()
y_0 = y['Trial Type'].values # 0 for not null
y_0 = CU.onehot_nulls(y_0)
val_score_0 = self.predict(X, y_0, model_0)
# SPLIT
# X_null, y_null, X_NotNull, y_NotNull = self.split(preds_0, X, y, onesGoLeft=True) # 1 if null, 0 if real trial
# NUM REACHES
y_1 = y['Num Reaches'].values
y_1 = CU.onehot_num_reaches(y_1) # 0 if <1, 1 if > 1 reaches
classifier = ReachClassifier()
val_score_1 = self.predict(X, y_1, model_1)
# SPLIT
# X_greater, y_greater, X_less, y_less = self.split(preds_1, X_NotNull, y_NotNull, onesGoLeft=True) # 0 if <1, 1 if > 1 reaches
# WHICH HAND
classifier = ReachClassifier()
y_2 = y['Which Hand'].values # # classify 0 as r/l
y_2 = CU.hand_type_onehot(y_2)
val_score_2 = self.predict(X, y_2, model_2)
# X_bi, y_bi, X_rl, y_rl = self.split(preds_2, X_less, y_less, onesGoLeft=True) # classify 0 as r/l, 1 or non r/l
return [val_score_0, val_score_1, val_score_2]
def predict(self, X, y, model):
# let
k = 5
X_selected, fs = ReachClassifier.do_feature_selection(X, y, k)
_, val_score = ReachClassifier.evaluate(model, X_selected, y)
return val_score
def trace_datapoint(self, X, arr=[]):
""" Q3.2
for a data point from the spam dataset, prints splits and thresholds
as it is classified down the tree.
"""
pass
class MakeFeatures:
# Operates on a single trial.
pos_names = ['Handle', 'Back Handle', 'Nose',
'Left Shoulder', 'Left Forearm', 'Left Wrist', 'Left Palm', 'Left Index Base', 'Left Index Tip',
'Left Middle Base', 'Left Middle Tip', 'Left Third Base',
'Left Third Tip', 'Left Fourth Finger Base', 'Left Fourth Finger Tip',
'Right Shoulder', 'Right Forearm', 'Right Wrist', 'Right Palm', 'Right Index Base',
'Right Index Tip', 'Right Middle Base', 'Right Middle Tip', 'Right Third Base',
'Right Third Tip', 'Right Fourth Finger Base', 'Right Fourth Finger Tip']
def __init__(self, trial_arr):
# partition coords and probabilities
self.num_bodyparts = 27
self.num_coords = 3
self.split_index = self.num_bodyparts * self.num_coords # 27 bodyparts * 3 XYZ coordinates for each = 81
self.coords = trial_arr[:self.split_index] # all XYZ coords of all bodyparts (81 rows of first half of array)
self.prob = trial_arr[self.split_index:] # all probability columns (81 rows of second half of array)
# display(coords, prob)
def calc_position(self):
# calculate position of each bodypart (x+y+z/3)
positions = [] # 2D array with rows are bodyparts, cols are frame nums
for i in np.arange(0, len(self.coords), self.num_coords): # for every bodypart
X = self.coords[i]
Y = self.coords[i + 1]
Z = self.coords[i + 2]
pos = (X + Y + Z) / self.num_coords # 1D array
positions.append(pos)
assert (len(positions) == self.num_bodyparts)
return positions
def calc_velocity_speed(self, time):
"""
Time is sliced from exp block 'time' column
"""
# calculate velocity for each XYZ bodypart (x1-x0/t0-t1)
velocities = [] # 2D array with rows are XYZ bodyparts, cols are frame nums
for i in np.arange(0, self.split_index, self.num_coords): # for every bodypart
X = self.coords[i]
Y = self.coords[i + 1]
Z = self.coords[i + 2]
for arr in [X, Y, Z]:
vel = []
for j in np.arange(len(arr) - 1):
x_0 = arr[j]
x_1 = arr[j + 1]
t_0 = time[j]
t_1 = time[j + 1]
vel.append(x_1 - x_0 / t_1 - t_0)
velocities.append(vel)
assert (len(velocities) == self.split_index)
# calculate speed of each bodypart (vel_x+vel_y+vel_z/3)
speeds = [] # 1D array with rows are bodyparts, cols are frame nums
for i in np.arange(0, self.split_index, self.num_coords):
x_vel = velocities[i]
y_vel = velocities[i + 1]
z_vel = velocities[i + 2]
x_squared = np.dot(x_vel, x_vel)
y_squared = np.dot(y_vel, y_vel)
z_squared = np.dot(z_vel, z_vel)
speed = (x_squared + y_squared + z_squared) / 3 # int
speeds.append(speed)
assert (len(speeds) == self.num_bodyparts)
return velocities, speeds
@staticmethod
def calc_all(trial_arr, time):
# Calculate
f = MakeFeatures(trial_arr)
positions = f.calc_position()
velocities, speeds = f.calc_velocity_speed(time)
# take mean & median of each bodypart for 2D arrays
mean_vel = np.mean(velocities, axis=1) # len = 81
median_vel = np.median(velocities, axis=1)
mean_pos = np.mean(positions, axis=1) # len = 27
median_pos = np.median(positions, axis=1)
# Create df
# concat all arrays
speeds.extend(mean_pos)
speeds.extend(median_pos)
speeds.extend(mean_vel)
speeds.extend(median_vel)
# create col names
col_names = [bodypart + ' speed' for bodypart in f.pos_names]
col_names.extend([bodypart + ' mean pos' for bodypart in f.pos_names])
col_names.extend([bodypart + ' median pos' for bodypart in f.pos_names])
xzy_pos_names = [bodypart + ' X' for bodypart in f.pos_names] + [bodypart + ' Y' for bodypart in
f.pos_names] + [bodypart + ' Z' for bodypart in
f.pos_names]
col_names.extend([bodypart + ' mean vel' for bodypart in xzy_pos_names])
col_names.extend([bodypart + ' median vel' for bodypart in xzy_pos_names])
# create df
df = pd.DataFrame([speeds], columns=col_names)
return df
@staticmethod
def make_block_features(trials, times):
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#%%
import logging
logging.basicConfig(filename='covi19_dashboarder.log',
level=logging.ERROR,
format='%(asctime)s %(message)s')
logger = logging.getLogger("covi19_dashboarder")
class Preprocessor():
def __init__(self):
from pathlib import Path
self.current_data_confirmed_ = None
self.current_data_deaths_ = None
self.final_data_path_ = 'https://raw.githubusercontent.com/GermanCM/Covid19_data_analyzer/master/data/covid19_ts_data.csv'
def change_date_format(self, x):
try:
date_elements = x.split('/')
year = '20'+date_elements[2]
day = ('0' + date_elements[1]) if (int(date_elements[1])+1) <11 else date_elements[1]
month = ('0' + date_elements[0]) if int(date_elements[0])<11 else date_elements[0]
return year+'-'+month+'-'+day
except Exception as exc:
return exc
def get_current_data(self, ts_all_data_columns):
try:
from datetime import datetime, timedelta
from tqdm import tqdm
import pandas as pd
import numpy as np
####################TIME SERIES FILES
DATA_PATH_CONFIRMED = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
DATA_PATH_DEATHS = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
#DATA_PATH_RECOVERED = 'https://raw.githubusercontent.com/GermanCM/COVID-19/my_updated_master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'
self.current_data_confirmed_ = pd.read_csv(filepath_or_buffer=DATA_PATH_CONFIRMED, sep=',')
self.current_data_deaths_ = pd.read_csv(filepath_or_buffer=DATA_PATH_DEATHS, sep=',')
#current_data_recovered = pd.read_csv(filepath_or_buffer=DATA_PATH_RECOVERED, sep=',')
ts_all_data = pd.DataFrame(columns=ts_all_data_columns)
time_columns = self.current_data_confirmed_.columns[4:]
date_new_names = pd.Series(time_columns).apply(self.change_date_format).values
today_date = datetime.today().date()
current_data = pd.read_csv(self.final_data_path_)
current_data.rename(columns={'Unnamed: 0': 'Date'}, inplace=True)
current_data.set_index('Date', inplace=True)
last_date_in_data = current_data.index[-1]
# convert string last_date to datetime
last_date_array = last_date_in_data.split('-')
last_date_year = int(last_date_array[0])
last_date_month = int(last_date_array[1])
last_date_day = int(last_date_array[2])
last_date_in_data=datetime(year=last_date_year, month=last_date_month, day=last_date_day).date()
if last_date_in_data < (today_date+timedelta(days=-1)):
country_with_colonies=list([])
country_with_provinces=list([])
unique_countries = self.current_data_confirmed_['Country/Region'].unique()
countries_regions = self.current_data_confirmed_['Country/Region']
while last_date_in_data < (today_date+timedelta(days=-1)):
for country in unique_countries:
if len(self.current_data_confirmed_[[country in x for x in countries_regions]])>1:
country_provinces = np.array(self.current_data_confirmed_[[country in x for x in countries_regions]]['Province/State'].values)
if pd.isnull(country_provinces).any():
country_with_colonies.append(country)
else:
country_with_provinces.append(country)
rest_of_countries = [x for x in unique_countries if ((x not in country_with_colonies)&(x not in country_with_provinces))]
#country_with_colonies
for country in tqdm(rest_of_countries):
# CONFIRMED INFECTIONS DATA
this_country_CONFIRMED_data_mask=self.current_data_confirmed_['Country/Region']==country
this_country_CONFIRMED_data = self.current_data_confirmed_[this_country_CONFIRMED_data_mask]
#por cada país (esto es, cada columna numérica correspondiente a país/región)
this_country_ts_CONFIRMED_values = this_country_CONFIRMED_data[this_country_CONFIRMED_data.columns[4:]].T
this_country_ts_CONFIRMED_values.index = date_new_names
# CONFIRMED DEATHS DATA
this_country_DEATHS_data_mask=self.current_data_deaths_['Country/Region']==country
this_country_DEATHS_data = self.current_data_deaths_[this_country_DEATHS_data_mask]
#por cada país (esto es, cada columna numérica correspondiente a país/región)
this_country_ts_DEATHS_values = this_country_DEATHS_data[this_country_DEATHS_data.columns[4:]].T
this_country_ts_DEATHS_values.index = date_new_names
ts_country_data=pd.DataFrame(index=date_new_names)
ts_country_data['Country']=country
#country_mask = self.current_data_confirmed_['Country/Region']==country_name
ts_country_data['Latitude']=this_country_CONFIRMED_data['Lat'].iloc[0]
ts_country_data['Longitude']=this_country_CONFIRMED_data['Long'].iloc[0]
ts_country_data['Confirmed']=this_country_ts_CONFIRMED_values
ts_country_data['Deaths']=this_country_ts_DEATHS_values
#ts_country_data['Recovered']=ts_recovered_values[countries_dict[country]]
ts_all_data = ts_all_data.append(ts_country_data)
#%%
from tqdm import tqdm
#country_with_colonies
for country in tqdm(country_with_colonies):
# we only get country data whose province/region is null
# CONFIRMED INFECTIONS DATA
this_country_with_colonies_CONFIRMED_data_mask=self.current_data_confirmed_['Country/Region']==country
this_country_with_colonies_CONFIRMED_data = self.current_data_confirmed_[this_country_with_colonies_CONFIRMED_data_mask]
main_country_CONFIRMED_data_mask=pd.isna(this_country_with_colonies_CONFIRMED_data['Province/State'])
this_country_CONFIRMED_data=this_country_with_colonies_CONFIRMED_data[main_country_CONFIRMED_data_mask]
#por cada país (esto es, cada columna numérica correspondiente a país/región)
this_country_ts_CONFIRMED_values = this_country_CONFIRMED_data[this_country_CONFIRMED_data.columns[4:]].T
this_country_ts_CONFIRMED_values.index = date_new_names
# CONFIRMED DEATHS DATA
this_country_with_colonies_DEATHS_data_mask=self.current_data_deaths_['Country/Region']==country
this_country_with_colonies_DEATHS_data = self.current_data_deaths_[this_country_with_colonies_DEATHS_data_mask]
main_country_DEATHS_data_mask=pd.isna(this_country_with_colonies_DEATHS_data['Province/State'])
this_country_DEATHS_data=this_country_with_colonies_DEATHS_data[main_country_DEATHS_data_mask]
#por cada país (esto es, cada columna numérica correspondiente a país/región)
this_country_ts_DEATHS_values = this_country_DEATHS_data[this_country_DEATHS_data.columns[4:]].T
this_country_ts_DEATHS_values.index = date_new_names
ts_country_data=pd.DataFrame(index=date_new_names)
ts_country_data['Country']=country
#country_mask = self.current_data_confirmed_['Country/Region']==country_name
ts_country_data['Latitude']=this_country_CONFIRMED_data['Lat'].iloc[0]
ts_country_data['Longitude']=this_country_CONFIRMED_data['Long'].iloc[0]
ts_country_data['Confirmed']=this_country_ts_CONFIRMED_values
ts_country_data['Deaths']=this_country_ts_DEATHS_values
#ts_country_data['Recovered']=ts_recovered_values[countries_dict[country]]
ts_all_data = ts_all_data.append(ts_country_data)
#%%
#country_with_provinces
for country in tqdm(country_with_provinces):
coordinates_dict = {'China': {'Latitude': 40.1824,'Longitude': 116.4142},
'Australia': {'Latitude': -35.4735, 'Longitude': 149.0124},
'Canada': {'Latitude': 49.2827, 'Longitude': -123.1207}}
# we only get country data whose province/region is null
# CONFIRMED INFECTIONS DATA
this_country_with_colonies_CONFIRMED_data_mask=self.current_data_confirmed_['Country/Region']==country
this_country_with_colonies_CONFIRMED_data = self.current_data_confirmed_[this_country_with_colonies_CONFIRMED_data_mask]
#main_country_CONFIRMED_data_mask=pd.isna(this_country_with_colonies_CONFIRMED_data['Province/State'])
this_country_CONFIRMED_data=this_country_with_colonies_CONFIRMED_data.groupby(by='Country/Region').sum()
#por cada país (esto es, cada columna numérica correspondiente a país/región)
this_country_ts_CONFIRMED_values = this_country_CONFIRMED_data[this_country_CONFIRMED_data.columns[2:]].T
this_country_ts_CONFIRMED_values.index = date_new_names
# CONFIRMED DEATHS DATA
this_country_with_colonies_DEATHS_data_mask=self.current_data_deaths_['Country/Region']==country
this_country_with_colonies_DEATHS_data = self.current_data_deaths_[this_country_with_colonies_DEATHS_data_mask]
#main_country_DEATHS_data_mask=pd.isna(this_country_with_colonies_DEATHS_data['Province/State'])
this_country_DEATHS_data=this_country_with_colonies_DEATHS_data.groupby(by='Country/Region').sum()
#por cada país (esto es, cada columna numérica correspondiente a país/región)
this_country_ts_DEATHS_values = this_country_DEATHS_data[this_country_DEATHS_data.columns[2:]].T
this_country_ts_DEATHS_values.index = date_new_names
ts_country_data=
|
pd.DataFrame(index=date_new_names)
|
pandas.DataFrame
|
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import shuffle
from rbml import RBML
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split, KFold
from sklearn.datasets import load_iris, load_wine
from metric_learn import LMNN
import pandas as pd
import utils
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='all',
help='Dataset name')
parser.add_argument('--a', type=float, default=0.2,
help='alpha RBML')
parser.add_argument('--b', type=float, default=2,
help='beta RBML')
parser.add_argument('--k_neighbors', type=int, default=3,
help='k_neighbors RBML')
parser.add_argument('--iteration', type=int, default=4,
help='iteration RBML')
return parser.parse_args()
def zscore_normalization(dataset_x, dataset_y):
scaler = StandardScaler()
print(f'{dataset_name} dataset z-score normalization...')
return scaler.fit_transform(dataset_x), dataset_y
class Pipeline:
def __init__(self, dataset_name, a=0.5, b=2, k_neighbors=3, use_lmnn=False):
self.scaler_lmnn = StandardScaler()
self.scaler_rbml = StandardScaler()
self.scaler_rf = StandardScaler()
self.knn = KNeighborsClassifier(n_neighbors=k_neighbors)
self.knn_lmnn = KNeighborsClassifier(n_neighbors=k_neighbors)
self.knn_raw = KNeighborsClassifier(n_neighbors=k_neighbors)
self.lmnn = LMNN(k=k_neighbors, verbose=False)
self.rbml = RBML(a=a, b=b, k_neighbors=k_neighbors, dataset=dataset_name)
self.random_forest = None
self.use_lmnn = use_lmnn
def fit(self, train_x, train_y, iteration=4):
self.knn_raw.fit(train_x, train_y)
self.lmnn.fit(train_x, train_y)
lmnn_projected = self.lmnn.transform(train_x)
self.knn_lmnn.fit(lmnn_projected, train_y)
lmnn_projected = self.scaler_lmnn.fit_transform(lmnn_projected)
if not self.use_lmnn:
rbml_projected = self.rbml.fit_transform(train_x, train_y)
else:
rbml_projected = self.rbml.fit_transform(x=lmnn_projected, y=train_y, iteration=iteration)
#print(' --> '.join([str(np.round(m, 2)) for m in self.rbml.avg_margins]))
rbml_projected = self.scaler_rbml.fit_transform(rbml_projected)
self.random_forest = RandomForestRegressor(n_estimators=train_x.shape[1])
if not self.use_lmnn:
self.random_forest.fit(train_x, rbml_projected)
rf_projected = self.random_forest.predict(train_x)
else:
self.random_forest.fit(lmnn_projected, rbml_projected)
rf_projected = self.random_forest.predict(lmnn_projected)
rf_projected = self.scaler_rf.fit_transform(rf_projected)
self.knn.fit(rf_projected, train_y)
def transform(self, test_x):
if not self.use_lmnn:
rf_projected = self.random_forest.predict(test_x)
else:
lmnn_projected = self.lmnn.transform(test_x)
lmnn_projected = self.scaler_lmnn.transform(lmnn_projected)
rf_projected = self.random_forest.predict(lmnn_projected)
rf_projected = self.scaler_rf.transform(rf_projected)
return self.knn.predict(rf_projected)
def score(self, test_x, test_y):
result = self.transform(test_x)
return accuracy_score(test_y, result)
def lmnn_score(self, test_x, test_y):
lmnn_projected = self.lmnn.transform(test_x)
result = self.knn_lmnn.predict(lmnn_projected)
return accuracy_score(test_y, result)
def raw_score(self, test_x, test_y):
result = self.knn_raw.predict(test_x)
return accuracy_score(test_y, result)
def process_pipeline_1(dataset_x, dataset_y, dataset_name, a=0.5, b=2, k_neighbors=3, iteration=4):
dataset_x, dataset_y = zscore_normalization(dataset_x, dataset_y)
kfold = KFold(n_splits=len(dataset_x), shuffle=True, random_state=42)
#kfold = KFold(n_splits=100, shuffle=True, random_state=42)
fold = kfold.split(dataset_x, dataset_y)
raw_scores = []
lmnn_scores = []
rbml_scores = []
pipeline = None
for train, test in fold:
pipeline = Pipeline(dataset_name, a=a, b=b, k_neighbors=k_neighbors)
pipeline.fit(dataset_x[train], dataset_y[train], iteration=iteration)
raw_scores.append(pipeline.raw_score(dataset_x[test], dataset_y[test]))
lmnn_scores.append(pipeline.lmnn_score(dataset_x[test], dataset_y[test]))
rbml_scores.append(pipeline.score(dataset_x[test], dataset_y[test]))
avg_margins = pipeline.rbml.avg_margins
utils.plot_mean_mi(avg_margins, save_path=f'{dataset_name}_mean_mi.png',
title=f'{dataset_name} Dataset Average Margins')
utils.plot_dataset(dataset_x, dataset_y, save_path=f'{dataset_name}_raw_dataset.png',
title=f'{dataset_name} Raw Dataset')
utils.plot_dataset(pipeline.rbml.x, pipeline.rbml.y, save_path=f'{dataset_name}_rbml_trained.png',
title=f'{dataset_name} Dataset RBML Projected')
utils.plot_dataset(pipeline.random_forest.predict(dataset_x), dataset_y,
save_path=f'{dataset_name}_rf_projected_target.png',
title=f'{dataset_name} Dataset RF Projected Target')
print(f'Euclidean Accuracy: {np.mean(raw_scores):.3f}')
print(f'LMNN Accuracy: {np.mean(lmnn_scores):.3f}')
print(f'RBML Accuracy: {np.mean(rbml_scores):.3f}')
def process_pipeline_2(dataset_x, dataset_y, dataset_name, a=0.5, b=2, k_neighbors=3, iteration=4):
"""For the Vowel, Balance and Pima datasets, 250 samples were randomly selected as a training set and the rest were used to define the test set.
Hence, 278, 375, and 518 test samples were available for each dataset, respectively.
This process was repeated 10 times independently.
For each dataset and each method, the average accuracy and the corresponding standard deviation values were computed."""
dataset_x, dataset_y = zscore_normalization(dataset_x, dataset_y)
raw_scores = []
lmnn_scores = []
rbml_scores = []
pipeline = None
for p in range(10):
# shuffle dataset
shuffle_index = np.random.permutation(len(dataset_x))
dataset_x, dataset_y = dataset_x[shuffle_index], dataset_y[shuffle_index]
# split dataset
train_x, test_x, train_y, test_y = dataset_x[:250], dataset_x[250:], dataset_y[:250], dataset_y[250:]
pipeline = Pipeline(dataset_name, a=a, b=b, k_neighbors=k_neighbors)
pipeline.fit(train_x, train_y, iteration=iteration)
raw_scores.append(pipeline.raw_score(test_x, test_y))
lmnn_scores.append(pipeline.lmnn_score(test_x, test_y))
rbml_scores.append(pipeline.score(test_x, test_y))
avg_margins = pipeline.rbml.avg_margins
utils.plot_mean_mi(avg_margins, save_path=f'{dataset_name}_mean_mi.png',
title=f'{dataset_name} Dataset Average Margins')
utils.plot_dataset(dataset_x, dataset_y, save_path=f'{dataset_name}_raw_dataset.png',
title=f'{dataset_name} Raw Dataset')
utils.plot_dataset(pipeline.rbml.x, pipeline.rbml.y, save_path=f'{dataset_name}_rbml_trained.png',
title=f'{dataset_name} Dataset RBML Projected')
utils.plot_dataset(pipeline.random_forest.predict(dataset_x), dataset_y,
save_path=f'{dataset_name}_rf_projected_target.png',
title=f'{dataset_name} Dataset RF Projected Target')
print(f'Euclidean Accuracy: {np.mean(raw_scores):.3f}, ({np.std(raw_scores):.3f})')
print(f'LMNN Accuracy: {np.mean(lmnn_scores):.3f}, ({np.std(lmnn_scores):.3f})')
print(f'RBML Accuracy: {np.mean(rbml_scores):.3f}, ({np.std(rbml_scores):.3f})')
def process_pipeline_3(dataset_x, dataset_y, dataset_name, a=0.5, b=2, k_neighbors=3, iteration=4):
dataset_x, dataset_y = zscore_normalization(dataset_x, dataset_y)
kfold = KFold(n_splits=10, shuffle=True, random_state=42)
fold = kfold.split(dataset_x, dataset_y)
raw_scores = []
lmnn_scores = []
rbml_scores = []
pipeline = None
for train, test in fold:
pipeline = Pipeline(dataset_name, a=a, b=b, k_neighbors=k_neighbors)
pipeline.fit(dataset_x[train], dataset_y[train], iteration=iteration)
raw_scores.append(pipeline.raw_score(dataset_x[test], dataset_y[test]))
lmnn_scores.append(pipeline.lmnn_score(dataset_x[test], dataset_y[test]))
rbml_scores.append(pipeline.score(dataset_x[test], dataset_y[test]))
avg_margins = pipeline.rbml.avg_margins
utils.plot_mean_mi(avg_margins, save_path=f'{dataset_name}_mean_mi.png',
title=f'{dataset_name} Dataset Average Margins')
utils.plot_dataset(dataset_x, dataset_y, save_path=f'{dataset_name}_raw_dataset.png',
title=f'{dataset_name} Raw Dataset')
utils.plot_dataset(pipeline.rbml.x, pipeline.rbml.y, save_path=f'{dataset_name}_rbml_trained.png',
title=f'{dataset_name} Dataset RBML Projected')
utils.plot_dataset(pipeline.random_forest.predict(dataset_x), dataset_y,
save_path=f'{dataset_name}_rf_projected_target.png',
title=f'{dataset_name} Dataset RF Projected Target')
print(f'Euclidean Accuracy: {np.mean(raw_scores):.3f}, ({np.std(raw_scores):.3f})')
print(f'LMNN Accuracy: {np.mean(lmnn_scores):.3f}, ({np.std(lmnn_scores):.3f})')
print(f'RBML Accuracy: {np.mean(rbml_scores):.3f}, ({np.std(rbml_scores):.3f})')
if __name__ == '__main__':
args = parse_args()
#datase_names = ['iris', 'wine', 'sonar']
#dataset_names = ['vowel', 'balance', 'pima']
#dataset_names = ['segmentation', 'letters']
#dataset_names = ['sonar', 'balance']
if args.dataset == 'all':
dataset_names = reversed(['iris', 'wine', 'sonar','vowel', 'balance', 'pima', 'segmentation', 'letters'])
else:
dataset_names = [args.dataset]
alpha, beta, k_neighbors, iteration = args.a, args.b, args.k_neighbors, args.iteration
for dataset_name in dataset_names:
print(f'Processing {dataset_name} dataset...')
if dataset_name == 'iris':
iris = load_iris()
dataset_x, dataset_y = iris.data, iris.target
#alpha, beta, k_neighbors, iteration = 0.2, 2, 3, 5
process_pipeline_1(dataset_x, dataset_y, dataset_name, a=alpha, b=beta, k_neighbors=k_neighbors, iteration=iteration)
elif dataset_name == 'wine':
wine = load_wine()
dataset_x, dataset_y = wine.data, wine.target
#alpha, beta, k_neighbors, iteration = 0.2, 2, 3, 5
process_pipeline_1(dataset_x, dataset_y, dataset_name, a=alpha, b=beta, k_neighbors=k_neighbors, iteration=iteration)
elif dataset_name == 'sonar':
dataset = pd.read_csv('datasets/sonar/sonar.all-data', header=None)
array = dataset.values
dataset_x = array[:, :-1].astype(float)
dataset_y = array[:, -1]
dataset_y[dataset_y == 'R'] = 0
dataset_y[dataset_y == 'M'] = 1
dataset_y = dataset_y.astype(int)
#alpha, beta, k_neighbors, iteration = 0.2, 2, 3, 4
process_pipeline_1(dataset_x, dataset_y, dataset_name, a=alpha, b=beta, k_neighbors=k_neighbors, iteration=iteration)
elif dataset_name == 'vowel':
#dataset = pd.read_csv("datasets/vowel/vowel-context.data", delimiter="\s+", header=None)
dataset = pd.read_csv("datasets/vowel/vowel.tr-orig-order", header=None)
array = dataset.values
dataset_x = array[:, :-1].astype(float)
dataset_y = array[:, -1]
#alpha, beta, k_neighbors, iteration = 0.2, 2, 3, 4
process_pipeline_2(dataset_x, dataset_y, dataset_name, a=alpha, b=beta, k_neighbors=k_neighbors, iteration=iteration)
elif dataset_name == 'balance':
dataset = pd.read_csv("datasets/balance_scale/balance-scale.data", delimiter=",", header=None)
array = dataset.values
dataset_x = array[:, 1:].astype(float)
dataset_y = array[:, 0]
dataset_y[dataset_y == 'B'] = 0
dataset_y[dataset_y == 'R'] = 1
dataset_y[dataset_y == 'L'] = 2
dataset_y = dataset_y.astype(int)
#alpha, beta, k_neighbors, iteration = 0.2, 2, 3, 4
process_pipeline_2(dataset_x, dataset_y, dataset_name, a=alpha, b=beta, k_neighbors=k_neighbors, iteration=iteration)
elif dataset_name in ['pima', 'diabetes']:
pima = pd.read_csv("datasets/pima-indians-diabetes.csv", delimiter=",", header=None)
dataset_x, dataset_y = pima.iloc[:, :-1].values, pima.iloc[:, -1].values
#alpha, beta, k_neighbors, iteration = 0.2, 2, 3, 4
process_pipeline_2(dataset_x, dataset_y, dataset_name, a=alpha, b=beta, k_neighbors=k_neighbors, iteration=iteration)
elif dataset_name == 'segmentation':
dataset1 =
|
pd.read_csv("datasets/image_segmentation/segmentation.data", delimiter=",", header=None)
|
pandas.read_csv
|
import requests
import json
import pandas as pd
import config_LinkedIn as lc
import datetime
import exasol as e
import config_Exasol as ec
exaconnect = e.connect(
dsn=ec.dsn,
DRIVER=ec.DRIVER,
EXAHOST=ec.EXAHOST,
EXAUID=ec.EXAUID,
EXAPWD=ec.EXAPWD,
autocommit=True
)
apikey = lc.apitoken
cu_online_id = lc.cu_online_id
exasol_db = 'CU_ONLINE_MARKETING_STG.LINKEDIN_CAMPAIGNS_UPDATED'
def get_linkedin_dataframe(passed_url):
get_active_url = requests.get(passed_url)
print(get_active_url)
json_details = json.loads(get_active_url.text)['elements']
df = pd.io.json.json_normalize(json_details)
return df
def linkedin_converttime(dataframe, field):
dataframe[field] = pd.to_numeric(dataframe[field])
dataframe[field] =
|
pd.to_datetime(dataframe[field], unit='ms')
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 25 12:14:08 2019
@author: jai
"""
import cv2
import math
import h5py
import numpy as np
import pandas as pd
from PIL import Image
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
#Extracting data
f = h5py.File('1.mat')
f['cjdata']['PID'].value
f['cjdata']['label'].value
tumorImage = f['cjdata']['image'].value
tumorBorder = f['cjdata']['tumorBorder']
tumorMask = f['cjdata']['tumorMask']
plt.imshow(tumorMask)
plt.imshow(tumorImage)
#Tumor Highlight
f = h5py.File('600.mat')
tumorImage = f['cjdata']['image'].value
tumorBorder = f['cjdata']['tumorBorder']
tumorMask = f['cjdata']['tumorMask']
plt.imshow(tumorImage, cmap='gray')
plt.imshow(tumorMask, cmap='jet', alpha=0.1)
#Cropping Tumor
array = f['cjdata']['tumorBorder'].value
x = array[0][::2]
y = array[0][1::2]
plt.plot(y, x)
x_max = math.ceil(max(x))
x_min = math.floor(min(x))
y_max = math.ceil(max(y))
y_min = math.floor(min(y))
crop_rect_x = [x_min, x_min, x_max, x_max, x_min]
crop_rect_y = [y_min, y_max, y_max, y_min, y_min]
plt.plot(crop_rect_y, crop_rect_x)
width = x_max - x_min
height = y_max - y_min
# Testing Data
if width > 300:
excess_width = width - 300
if excess_width % 2 == 0:
x_max = x_max - excess_width / 2
x_min = x_min - excess_width / 2
crop = tumorImage[x_min: x_max, y_min: y_max]
plt.imshow(crop, cmap='gray')
plt.imshow(tumorImage)
#crop_scaled = tumorImage[(x_min-10): (x_max+10), (y_min-10): (y_max+10)]
#plt.imshow(crop_scaled, cmap='gray')
array = (np.random.rand(100, 200)*256).astype(np.uint8)
img = Image.fromarray(data).convert('L')
plt.imshow(img)
plt.imshow(tumorImage)
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
gray = rgb2gray(array)
cv2.imshow('hey', img)
data = tumorImage
data = data.astype(np.uint8)
data = data / 255
plt.imshow(data)
# Finding range and mean of cropped tumor shape
sum_0 = 0
sum_1 = 0
max_0 = 0
max_1 = 0
min_0 = 1000
min_1 = 1000
range_min = 0
range_max = 0
for image in croppedTumorImages:
sum_0 += image.shape[0]
sum_1 += image.shape[1]
if image.shape[0] > max_0:
max_0 = image.shape[0]
if image.shape[1] > max_1:
max_1 = image.shape[1]
if image.shape[0] < min_0:
min_0 = image.shape[0]
if image.shape[1] < min_1:
min_1 = image.shape[1]
if image.max() > range_max:
range_max = image.max()
if image.min() < range_min:
range_min = image.min()
print(sum_0/3064, sum_1/3064)
plt.imshow(croppedTumorImages[20])
tumorImage = tumorImage.astype('uint8')
pic = cv2.cvtColor(array, cv2.COLOR_BGR2GRAY)
info = np.iinfo(croppedTumorImages[20].dtype) # Get the information of the incoming image type
data = croppedTumorImages[20].astype(np.float64) / info.max # normalize the data to 0 - 1
data = 255 * data # Now scale by 255
img = data.astype(np.uint8)
plt.imshow(img)
# Scaling Images
res = cv2.resize(img, dsize=(75, 75), interpolation=cv2.INTER_CUBIC)
plt.imshow(res)
# Creating dataset
PIDs = [0] * 3064
labels = [0] * 3064
tumorImages = [0] * 3064
tumorBorders = [0] * 3064
tumorMasks = [0] * 3064
croppedTumorImages = [0] * 3064
scaledTumorImages = [0] * 3064
for i in range(1, 3065):
f = h5py.File(str(i) + '.mat')
labels[i-1] = math.floor(f['cjdata']['label'].value[0][0])
PIDs[i-1] = f['cjdata']['PID'].value
tumorImages[i-1] = f['cjdata']['image'].value
tumorBorders[i-1] = f['cjdata']['tumorBorder']
tumorMasks[i-1] = f['cjdata']['tumorMask']
array = f['cjdata']['tumorBorder'].value
x = array[0][::2]
y = array[0][1::2]
x_max = math.ceil(max(x))
x_min = math.floor(min(x))
y_max = math.ceil(max(y))
y_min = math.floor(min(y))
tumorImage = f['cjdata']['image'].value
croppedTumorImages[i-1] = tumorImage[x_min: x_max, y_min: y_max]
scaledTumorImages[i-1] = cv2.resize(croppedTumorImages[i-1],
dsize=(75, 75), interpolation=cv2.INTER_CUBIC)
f.close()
columns = ['PID', 'label', 'tumorImage', 'tumorBorder', 'tummorMask',
'croppedTumorImage', 'scaledTumorImage']
data = pd.DataFrame({'PID':PIDs, 'label': labels, 'tumorImage': tumorImages,
'tumorBorder': tumorBorders, 'tummorMask': tumorMasks,
'croppedTumorImage': croppedTumorImages, 'scaledTumorImage': scaledTumorImages})
# Export as CSV
data.to_csv('dataframe.csv', index=False)
# Import CSV
data =
|
pd.read_csv('dataframe.csv')
|
pandas.read_csv
|
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
|
assert_frame_equal(new_move_df, expected)
|
pandas.testing.assert_frame_equal
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div/div" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).get_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replace('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Folie():
shop_id = 28
name = 'folie'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.folief.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Corban():
shop_id = 29
name = 'corban'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.corban.com.tw/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS"
try:
chrome.get(url)
except:
break
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Gmorning():
shop_id = 30
name = 'gmorning'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.gmorning.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def July():
shop_id = 31
name = 'july'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.july2017.co/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Per():
shop_id = 32
name = 'per'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.perdot.com.tw/categories/all?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cereal():
shop_id = 33
name = 'cereal'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cerealoutfit.com/new/page/" + str(p) + "/"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
try:
chrome.find_element_by_xpath(
"//button[@class='mfp-close']").click()
except:
pass
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/h3/a" % (i,)).text
if(title == ""):
i += 1
if(i == 25):
p += 1
continue
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@data-loop='%i']" % (i,)).get_attribute('126-id')
pic_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//ins//bdi" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//del//bdi" % (i,)).text
ori_price = ori_price.rstrip(' NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[2]//span[@class='woocommerce-Price-amount amount']" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Jcjc():
shop_id = 35
name = 'jcjc'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.jcjc-dailywear.com/collections/in-stock?limit=24&page=" + \
str(p) + "&sort=featured"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a[1][@href]" % (i,)).get_attribute('href')
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/span/a/img" % (i,)).get_attribute('src')
page_id = pic_link[pic_link.find("i/")+2:pic_link.find(".j")]
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/s/span" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Ccshop():
shop_id = 36
name = 'ccshop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.ccjshop.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Iris():
shop_id = 37
name = 'iris'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.irisgarden.com.tw/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='boxify-item product-item ng-isolate-scope'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nook():
shop_id = 39
name = 'nook'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.nooknook.me/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Greenpea():
shop_id = 40
name = 'greenpea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.greenpea-tw.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Queen():
shop_id = 42
name = 'queen'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.queenshop.com.tw/zh-TW/QueenShop/ProductList?item1=01&item2=all&Page=" + \
str(p) + "&View=4"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "SaleID=")
pic_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/img[1]" % (i,)).get_attribute('data-src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cozyfee():
shop_id = 48
name = 'cozyfee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cozyfee.com/product.php?page=" + \
str(p) + "&cid=55#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div[1]/a/img[1]" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[3]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Reishop():
shop_id = 49
name = 'reishop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.reishop.com.tw/pdlist2.asp?item1=all&item2=&item3=&keyword=&ob=A&pagex=&pageno=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 31):
try:
title = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("yano=YA")
page_id = page_id.replace("&color=", "")
pic_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span/img[1]" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 31):
p += 1
continue
i += 1
if(i == 31):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Yourz():
shop_id = 50
name = 'yourz'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.yourz.com.tw/product/category/34/1/" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 13):
try:
title = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/detail/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/font" % (i,)).text
sale_price = sale_price.replace('VIP價:NT$ ', '')
sale_price = sale_price.rstrip('元')
ori_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/br" % (i,)).text
ori_price = ori_price.replace('NT$ ', '')
ori_price = ori_price.rstrip('元')
except:
i += 1
if(i == 13):
p += 1
continue
i += 1
if(i == 13):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Seoulmate():
shop_id = 54
name = 'seoulmate'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.seoulmate.com.tw/catalog.php?m=115&s=249&t=0&sort=&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 33):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/p[1]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul/li[%i]/p[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=115&s=249&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//ul/li[%i]/a/img[1]" % (i,)).get_attribute('src')
if(pic_link == ""):
i += 1
if(i == 33):
p += 1
continue
except:
i += 1
if(i == 33):
p += 1
continue
try:
ori_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]/del" % (i,)).text
ori_price = ori_price.strip('NT.')
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
sale_price = sale_price.strip('NT.')
locate = sale_price.find("NT.")
sale_price = sale_price[locate+3:len(sale_price)]
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 33):
p += 1
continue
i += 1
if(i == 33):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Sweesa():
shop_id = 55
name = 'sweesa'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.sweesa.com/Shop/itemList.aspx?&m=20&o=5&sa=1&smfp=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 45):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&m=20", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('TWD.')
ori_price = ""
except:
i += 1
if(i == 45):
p += 1
continue
i += 1
if(i == 45):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pazzo():
shop_id = 56
name = 'pazzo'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pazzo.com.tw/recent?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("c=")
pic_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div[@class='item__images']/a/picture/img[@class='img-fluid']" % (i,)).get_attribute('src')
except:
i += 1
if(i == 41):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Meierq():
shop_id = 57
name = 'meierq'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
page = 0
prefix_urls = [
"https://www.meierq.com/zh-tw/category/bottomclothing?P=",
"https://www.meierq.com/zh-tw/category/jewelry?P=",
"https://www.meierq.com/zh-tw/category/outerclothing?P=",
"https://www.meierq.com/zh-tw/category/accessories?P=",
]
for prefix in prefix_urls:
page += 1
for i in range(1, page_Max):
url = f"{prefix}{i}"
try:
print(url)
chrome.get(url)
chrome.find_element_by_xpath("//div[@class='items__image']")
except:
print("find_element_by_xpath_break", page)
if(page == 4):
chrome.quit()
print("break")
break
break
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div/p/a" % (i,)).text
except:
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div/p/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "n/")
page_id = page_id[:page_id.find("?c")]
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div/img" % (i,)).get_attribute('src')
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Harper():
shop_id = 58
name = 'harper'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Written by <NAME>
Utility functions to work with 3d_Rest Test
These functions create master lists to directories for data, converts DICOMs to NIFTI stacks, splits the data, saves to HDF5 for easier loading etc.
"""
import h5py
import os, glob, re
import nibabel as nib
import numpy as np
import tensorflow as tf
from sklearn.cross_validation import train_test_split
import skimage
import pandas as pd
from skimage.transform import resize
import commands
import math
import shutil
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
from keras.utils import multi_gpu_model
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.Session(config=config)
## image globals
im_size_y = 256
im_size_x = 256
im_size_z = 40
full_imsize = 256
## run globals
batch_size = 6
epochs = 500
exclude_label = 2
split = 0.80
valid_split = 0.20
nlabel = 2
channels = 1
data_order = 'tf' # 'th' for Theano, 'tf' for Tensorflow
model_type = 'resnet'
## path globals
log_path = "/media/mccoyd2/hamburger/hemorrhage_study/logs"
master_list_path = "/media/mccoyd2/hamburger/hemorrhage_study/subject_lists"
hdf5_path = "/media/mccoyd2/hamburger/hemorrhage_study/tensors"
data_paths = ["/media/mccoyd2/hamburger/hemorrhage_study/image_data","/media/mccoyd2/spaghetti/hemorrhage_study_overflow"]
NLP_path = "/media/mccoyd2/hamburger/hemorrhage_study/NLP"
model_path = "/media/mccoyd2/hamburger/hemorrhage_study/models"
## master path list global
list_subjs_master = pd.read_csv(master_list_path+"/master_subject_list.csv")
study = ['CT_BRAIN_WO_CONTRAST']
slice_thickness = ['2mm','2_mm','2_0mm']
direction = 'Axial'
organ = 'Brain'
## create master list by looping through relevant date directories, converting the DICOM stack to nifti and saving information to master DF
def create_master_list():
study_search = [x.lower() for x in study]
list_subjects = pd.DataFrame([])
failed_nifti_conv_subjects = []
r = re.compile(".*dcm")
for path in data_paths:
print(path)
for group in os.listdir(path):
if os.path.isdir(os.path.join(path, group)):
for batch in os.listdir(os.path.join(path, group)):
dicom_sorted_path = os.path.join(path, group, batch, 'DICOM-SORTED')
if os.path.isdir(dicom_sorted_path):
for subj in os.listdir(dicom_sorted_path):
mrn = subj.split('-')[0]
#print(mrn)
if os.path.isdir(os.path.join(dicom_sorted_path, subj)):
for proc_perf in os.listdir(os.path.join(dicom_sorted_path, subj)):
#print proc_perf
for input_proc in study_search:
if input_proc in proc_perf.lower():
#print input_proc
for proc in os.listdir(os.path.join(dicom_sorted_path, subj, proc_perf)):
if direction in proc:
#print direction
for slice in slice_thickness:
if re.findall(slice.lower(), proc.lower()):
if re.findall(organ, proc):
path_study = os.path.join(dicom_sorted_path, subj, proc_perf, proc)
print(path_study)
nii_in_path = False
ACN = proc_perf.split('-')[0]
datetime = re.findall(r"(\d{14})", proc)[0]
for fname in os.listdir(path_study):
if fname.endswith('.nii.gz'):
#os.remove(path_study+'/'+fname)
nifti_name = fname
nii_in_path = True
datetime = proc.split('-')[1]
datetime = datetime.split('_')[0]
list_subjects = list_subjects.append(pd.DataFrame({'Acn':[ACN], 'MRN': [mrn],'Patient_Path': [path_study+'/'+nifti_name], 'group': [group], 'Datetime': [datetime]}))
break
if not nii_in_path:
print(path_study)
ACN = proc_perf.split('-')[0]
print("Converting DICOMS for "+subj+" to NIFTI format")
os.chdir(path_study)
os.chdir('..')
status, output = commands.getstatusoutput('dcm2nii '+ proc)
if status != 0:
failed_nifti_conv_subjects.append(subj)
else:
index_nifti = [i for i, s in enumerate(output) if ">" in str(s)]
index_end = [i for i, s in enumerate(output[index_nifti[0]:]) if "\n" in str(s)]
nifti_name = output[index_nifti[0]+1:index_nifti[0]+index_end[0]]
list_subjects = list_subjects.append(pd.DataFrame({'Acn':[ACN],'MRN': [mrn],'Patient_Path': [path_study+'/'+nifti_name], 'group': [group], 'Datetime': [datetime]}))
master_list = pd.DataFrame(list_subjects)
failed_nifti_conv_subjects = pd.DataFrame(failed_nifti_conv_subjects)
master_list.to_csv(master_list_path+"/master_subject_list.csv")
failed_nifti_conv_subjects.to_csv(master_list_path+"/failed_nifti_converstions.csv")
return master_list, failed_nifti_conv_subjects
def load_master_list():
master_list = pd.read_csv(master_list_path+"/master_subject_list.csv")
return master_list
## split the master list into training, validation and test sets - also restrict the data to only initial exams if required
def get_filenames(master_list, initial_exam = 0):
list_subjs_master = master_list
if initial_exam == 1:
list_subjs_master['Datetime_Format'] = pd.to_datetime(list_subjs_master['Datetime'], format='%Y%m%d%H%M%S')
mrn_groups = list_subjs_master.groupby(list_subjs_master['MRN'])
list_subj_initial_CT = mrn_groups.agg(lambda x: x.loc[x.Datetime_Format.argmin()])
else:
list_subj_initial_CT = list_subjs_master
## change Acn to int for merge
list_subj_initial_CT['Acn'] = list_subj_initial_CT['Acn'].astype(int)
## merge the labels from NLP
data_from_text_DC_labeled = pd.read_excel(NLP_path+"/DC_Labeled/Rad_Labeled_Only.xlsx")
data_from_text_ML = pd.read_csv(NLP_path+"/Reports/Hemorrhage_Reports_Batch_1_Predictions.csv")
unique_rad_label, counts_rad_label = np.unique(data_from_text_DC_labeled['Label'], return_counts=True)
unique_ML_label, counts_ML_label = np.unique(data_from_text_ML['Label'], return_counts=True)
print("Radiologist labels: "+str(unique_rad_label)+" | counts of each label: "+str(counts_rad_label))
print("ML labels: "+str(unique_ML_label)+" | counts of each label: "+str(counts_ML_label))
data_labels_radiologist_and_ML = data_from_text_ML.append(pd.DataFrame(data = data_from_text_DC_labeled))
data_labels_radiologist_and_ML.to_csv(NLP_path+'/merged_ML_Rad_labels_check.csv')
merged_path_labels =
|
pd.merge(list_subj_initial_CT, data_labels_radiologist_and_ML, on=['Acn'],how='inner')
|
pandas.merge
|
import logging
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from scipy.interpolate import interp2d
from workalendar.europe import Portugal, Greece
def rescale(arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def stack_2d(X, sample, compress):
if compress:
sample = rescale(sample, 8, 8)
if len(sample.shape) == 3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape) == 2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
return X
def stack_3d(X, sample):
if X.shape[0] == 0:
X = sample
elif len(sample.shape) != len(X.shape):
X = np.vstack((X, sample[np.newaxis]))
else:
X = np.vstack((X, sample))
return X
class st_miguel(Portugal):
FIXED_HOLIDAYS = Portugal.FIXED_HOLIDAYS + (
(4, 11, "<NAME>"),
(7, 18, "Dia de Portugal"),
(6, 29, " <NAME>"),
)
def get_fixed_holidays(self, year):
days = super().get_fixed_holidays(year)
return days
def get_variable_days(self, year):
days = super().get_variable_days(year)
if year > 2015 or year < 2013:
days.append((self.get_easter_sunday(year) +
|
pd.DateOffset(days=36)
|
pandas.DateOffset
|
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32")
_check_cast(casted, "float32")
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16")
_check_cast(casted, "float16")
def test_astype_mixed_type(self, mixed_type_frame):
# mixed casting
mn = mixed_type_frame._get_numeric_data().copy()
mn["little_float"] = np.array(12345.0, dtype="float16")
mn["big_float"] = np.array(123456789101112.0, dtype="float64")
casted = mn.astype("float64")
_check_cast(casted, "float64")
casted = mn.astype("int64")
_check_cast(casted, "int64")
casted = mn.reindex(columns=["little_float"]).astype("float16")
_check_cast(casted, "float16")
casted = mn.astype("float32")
_check_cast(casted, "float32")
casted = mn.astype("int32")
_check_cast(casted, "int32")
# to object
casted = mn.astype("O")
_check_cast(casted, "object")
def test_astype_with_exclude_string(self, float_frame):
df = float_frame.copy()
expected = float_frame.astype(int)
df["string"] = "foo"
casted = df.astype(int, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
df = float_frame.copy()
expected = float_frame.astype(np.int32)
df["string"] = "foo"
casted = df.astype(np.int32, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_with_view_mixed_float(self, mixed_float_frame):
tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with pytest.raises(ValueError, match=msg):
df.astype(dtype)
def test_astype_str(self):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
result = df.astype(str)
expected = DataFrame(
{
"a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(str, map(Timestamp, b._values))),
"c": list(
map(
str,
map(lambda x: Timedelta(x)._repr_base(format="all"), c._values),
)
),
"d": list(map(str, d._values)),
"e": list(map(str, e._values)),
}
)
tm.assert_frame_equal(result, expected)
def test_astype_str_float(self):
# see gh-11302
result = DataFrame([np.NaN]).astype(str)
expected = DataFrame(["nan"])
tm.assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(str)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = "1.12345678901" if _np_version_under1p14 else "1.1234567890123457"
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range("2010-01-04", periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(["1.0", "2", "3.14", "4", "5.4"])
df = DataFrame({"a": a, "b": b, "c": c, "d": d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({"b": "str", "d": "float32"})
result = df.astype(dt1)
expected = DataFrame(
{
"a": a,
"b": Series(["0", "1", "2", "3", "4"]),
"c": c,
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float32"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
dt2 = dtype_class({"b": np.float32, "c": "float32", "d": np.float64})
result = df.astype(dt2)
expected = DataFrame(
{
"a": a,
"b": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype="float32"),
"c": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype="float32"),
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({"a": str, "b": str, "c": str, "d": str})
tm.assert_frame_equal(df.astype(dt3), df.astype(str))
tm.assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({"b": str, 2: str})
dt5 = dtype_class({"e": str})
msg = "Only a column name can be used for the key in a dtype mappings argument"
with pytest.raises(KeyError, match=msg):
df.astype(dt4)
with pytest.raises(KeyError, match=msg):
df.astype(dt5)
tm.assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name="a")
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name="b")
a2 = Series([0, 1, 2, 3, 4], name="a")
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(["1", "2", "3", "4", "5"], dtype="str", name="a")
b_str = Series(["0.1", "0.2", "0.4", "0.6", "0.8"], dtype=str, name="b")
a2_str = Series(["0", "1", "2", "3", "4"], dtype="str", name="a")
expected = concat([a1_str, b_str, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
result = df.astype({"a": "str"})
expected = concat([a1_str, b, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list("abcdef")),
CategoricalDtype(categories=list("edba"), ordered=False),
CategoricalDtype(categories=list("edcb"), ordered=True),
],
ids=repr,
)
def test_astype_categorical(self, dtype):
# GH 18099
d = {"A": list("abbc"), "B": list("bccd"), "C": list("cdde")}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cls",
[
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype,
],
)
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
xpr = "Expected an instance of {}".format(cls.__name__)
with pytest.raises(TypeError, match=xpr):
df.astype({"A": cls})
with pytest.raises(TypeError, match=xpr):
df["A"].astype(cls)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
expected1 = pd.DataFrame(
{
"a": integer_array([1, 3, 5], dtype=dtype),
"b": integer_array([2, 4, 6], dtype=dtype),
}
)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype("float64"), df)
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
df["b"] = df["b"].astype(dtype)
expected2 = pd.DataFrame(
{"a": [1.0, 3.0, 5.0], "b": integer_array([2, 4, 6], dtype=dtype)}
)
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
expected1 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
df["a"] = df["a"].astype(dtype)
expected2 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["category", "Int64"])
def test_astype_extension_dtypes_duplicate_col(self, dtype):
# GH 24704
a1 = Series([0, np.nan, 4], name="a")
a2 = Series([np.nan, 3, 5], name="a")
df = concat([a1, a2], axis=1)
result = df.astype(dtype)
expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [{100: "float64", 200: "uint64"}, "category", "float64"]
)
def test_astype_column_metadata(self, dtype):
# GH 19920
columns = pd.UInt64Index([100, 200, 300], name="foo")
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
assert (result.dtypes == object).all()
if dtype.startswith("M8"):
assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)
else:
assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
dtype = "M8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns"])
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["us", "ms", "s", "h", "m", "D"])
def test_astype_to_timedelta_unit(self, unit):
# coerce to float
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(df.values.astype(dtype).astype(float))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_incorrect_datetimelike(self, unit):
# trying to astype a m to a M, or vice-versa
# gh-19224
dtype = "M8[{}]".format(unit)
other = "m8[{}]".format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
msg = (
r"cannot astype a datetimelike from \[datetime64\[ns\]\] to"
r" \[timedelta64\[{}\]\]"
).format(unit)
with pytest.raises(TypeError, match=msg):
df.astype(other)
msg = (
r"cannot astype a timedelta from \[timedelta64\[ns\]\] to"
r" \[datetime64\[{}\]\]"
).format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError, match=msg):
df.astype(dtype)
def test_timedeltas(self):
df = DataFrame(
dict(
A=Series(date_range("2012-1-1", periods=3, freq="D")),
B=Series([timedelta(days=i) for i in range(3)]),
)
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
def test_arg_for_errors_in_astype(self):
# issue #14878
df = DataFrame([1, 2, 3])
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
df.astype(np.int8, errors="ignore")
def test_arg_for_errors_in_astype_dictlist(self):
# GH-25905
df = pd.DataFrame(
[
{"a": "1", "b": "16.5%", "c": "test"},
{"a": "2.2", "b": "15.3", "c": "another_test"},
]
)
expected = pd.DataFrame(
[
{"a": 1.0, "b": "16.5%", "c": "test"},
{"a": 2.2, "b": "15.3", "c": "another_test"},
]
)
type_dict = {"a": "float64", "b": "float64", "c": "object"}
result = df.astype(dtype=type_dict, errors="ignore")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
tm.assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected",
[
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(
DataFrame(
{
"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object),
}
),
True,
),
# multi-extension
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["a", "b"])}
),
True,
),
# differ types
(DataFrame({"A": [1, 2], "B": [1.0, 2.0]}), False),
# differ sizes
(
DataFrame(
{
"A": np.array([1, 2], dtype=np.int32),
"B": np.array([1, 2], dtype=np.int64),
}
),
False,
),
# multi-extension differ
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["b", "c"])}
),
False,
),
],
)
def test_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_asarray_homogenous(self):
df = pd.DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])})
result = np.asarray(df)
# may change from object in the future
expected = np.array([[1, 1], [2, 2]], dtype="object")
tm.assert_numpy_array_equal(result, expected)
def test_str_to_small_float_conversion_type(self):
# GH 20388
np.random.seed(13)
col_data = [str(np.random.random() * 1e-12) for _ in range(5)]
result = pd.DataFrame(col_data, columns=["A"])
expected = pd.DataFrame(col_data, columns=["A"], dtype=object)
tm.assert_frame_equal(result, expected)
# change the dtype of the elements from object to float one by one
result.loc[result.index, "A"] = [float(x) for x in col_data]
expected = pd.DataFrame(col_data, columns=["A"], dtype=float)
tm.assert_frame_equal(result, expected)
class TestDataFrameDatetimeWithTZ:
def test_interleave(self, timezone_frame):
# interleave with object
result = timezone_frame.assign(D="foo").values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
["foo", "foo", "foo"],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = timezone_frame.values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
def test_astype(self, timezone_frame):
# astype
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
expected = DataFrame(
expected,
index=timezone_frame.index,
columns=timezone_frame.columns,
dtype=object,
)
result = timezone_frame.astype(object)
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
import numpy as np
import pandas as pd
import unittest
from context import grama as gr
from context import data
X = gr.Intention()
##==============================================================================
## transform summary functions
##==============================================================================
class TestSummaryFcn(unittest.TestCase):
def test_mean(self):
df = data.df_diamonds >> gr.tf_select(X.cut, X.x) >> gr.tf_head(5)
# straight summarize
t = df >> gr.tf_summarize(m=gr.mean(X.x))
df_truth = pd.DataFrame({"m": [4.086]})
self.assertTrue(t.equals(df_truth))
# grouped summarize
t = df >> gr.tf_group_by(X.cut) >> gr.tf_summarize(m=gr.mean(X.x))
df_truth = pd.DataFrame(
{"cut": ["Good", "Ideal", "Premium"], "m": [4.195, 3.950, 4.045]}
)
self.assertTrue(t.equals(df_truth))
# straight mutate
t = df >> gr.tf_mutate(m=gr.mean(X.x))
df_truth = df.copy()
df_truth["m"] = df_truth.x.mean()
self.assertTrue(t.equals(df_truth))
# grouped mutate
t = df >> gr.tf_group_by(X.cut) >> gr.tf_mutate(m=gr.mean(X.x))
df_truth["m"] = pd.Series([3.950, 4.045, 4.195, 4.045, 4.195])
self.assertTrue(t.sort_index().equals(df_truth))
def test_skew(self):
df_truth = pd.DataFrame({"m": [0.09984760044443139]})
df_res = (
data.df_shewhart
>> gr.tf_summarize(m=gr.skew(X.tensile_strength))
)
self.assertTrue(df_truth.equals(df_res))
def test_kurt(self):
df_truth = pd.DataFrame({"m": [2.605643942300021]})
df_res = (
data.df_shewhart
>> gr.tf_summarize(m=gr.kurt(X.tensile_strength))
)
self.assertTrue(df_truth.equals(df_res))
def test_first(self):
df = data.df_diamonds >> gr.tf_select(X.cut, X.x) >> gr.tf_head(5)
# straight summarize
t = df >> gr.tf_summarize(f=gr.first(X.x))
df_truth = pd.DataFrame({"f": [3.95]})
self.assertTrue(t.equals(df_truth))
# grouped summarize
t = df >> gr.tf_group_by(X.cut) >> gr.tf_summarize(f=gr.first(X.x))
df_truth = pd.DataFrame(
{"cut": ["Good", "Ideal", "Premium"], "f": [4.05, 3.95, 3.89]}
)
self.assertTrue(t.equals(df_truth))
# summarize with order_by
t = df >> gr.tf_summarize(f=gr.first(X.x, order_by=gr.desc(X.cut)))
df_truth = pd.DataFrame({"f": [3.89]})
# straight mutate
t = df >> gr.tf_mutate(f=gr.first(X.x))
df_truth = df.copy()
df_truth["f"] = df_truth.x.iloc[0]
self.assertTrue(t.equals(df_truth))
# grouped mutate
t = df >> gr.tf_group_by(X.cut) >> gr.tf_mutate(f=gr.first(X.x))
df_truth["f"] = pd.Series([3.95, 3.89, 4.05, 3.89, 4.05])
self.assertTrue(t.sort_index().equals(df_truth))
def test_last(self):
df = data.df_diamonds >> gr.tf_select(X.cut, X.x) >> gr.tf_head(5)
# straight summarize
t = df >> gr.tf_summarize(l=gr.last(X.x))
df_truth = pd.DataFrame({"l": [4.34]})
self.assertTrue(t.equals(df_truth))
# grouped summarize
t = df >> gr.tf_group_by(X.cut) >> gr.tf_summarize(l=gr.last(X.x))
df_truth = pd.DataFrame(
{"cut": ["Good", "Ideal", "Premium"], "l": [4.34, 3.95, 4.20]}
)
self.assertTrue(t.equals(df_truth))
# summarize with order_by
t = df >> gr.tf_summarize(
f=gr.last(X.x, order_by=[gr.desc(X.cut), gr.desc(X.x)])
)
df_truth = pd.DataFrame({"f": [4.05]})
assert df_truth.equals(t)
# straight mutate
t = df >> gr.tf_mutate(l=gr.last(X.x))
df_truth = df.copy()
df_truth["l"] = df_truth.x.iloc[4]
self.assertTrue(t.equals(df_truth))
# grouped mutate
t = df >> gr.tf_group_by(X.cut) >> gr.tf_mutate(l=gr.last(X.x))
df_truth["l"] = pd.Series([3.95, 4.20, 4.34, 4.20, 4.34])
self.assertTrue(t.sort_index().equals(df_truth))
def test_nth(self):
df = data.df_diamonds >> gr.tf_select(X.cut, X.x) >> gr.tf_head(10)
# straight summarize
t = df >> gr.tf_summarize(second=gr.nth(X.x, 1))
df_truth = pd.DataFrame({"second": [3.89]})
self.assertTrue(t.equals(df_truth))
# grouped summarize
t = df >> gr.tf_group_by(X.cut) >> gr.tf_summarize(first=gr.nth(X.x, 0))
df_truth = pd.DataFrame(
{
"cut": ["Fair", "Good", "Ideal", "Premium", "Very Good"],
"first": [3.87, 4.05, 3.95, 3.89, 3.94],
}
)
self.assertTrue(t.equals(df_truth))
# summarize with order_by
t = df >> gr.tf_summarize(
last=gr.nth(X.x, -1, order_by=[gr.desc(X.cut), gr.desc(X.x)])
)
df_truth = pd.DataFrame({"last": [3.87]})
self.assertTrue(df_truth.equals(t))
# straight mutate
t = df >> gr.tf_mutate(out_of_range=gr.nth(X.x, 500))
df_truth = df.copy()
df_truth["out_of_range"] = np.nan
self.assertTrue(t.equals(df_truth))
# grouped mutate
t = df >> gr.tf_group_by(X.cut) >> gr.tf_mutate(penultimate=gr.nth(X.x, -2))
df_truth = df.copy()
df_truth["penultimate"] = pd.Series(
[np.nan, 3.89, 4.05, 3.89, 4.05, 4.07, 4.07, 4.07, np.nan, 4.07]
)
self.assertTrue(t.sort_index().equals(df_truth))
def test_n(self):
df = data.df_diamonds >> gr.tf_select(X.cut, X.x) >> gr.tf_head(5)
# straight summarize
t = df >> gr.tf_summarize(n=gr.n(X.x))
df_truth =
|
pd.DataFrame({"n": [5]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(self):
# GH 18186
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({'a': Categorical(data, ordered=True)})
actual = self.read_csv(StringIO('a\n' + '\n'.join(data)),
dtype='category')
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_numeric(self):
dtype = {'b': CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_datetime(self):
dtype = {
'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
dtype = {
'b': CategoricalDtype([pd.Timestamp("2014")])
}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = pd.DataFrame({'b': Categorical([pd.Timestamp('2014')] * 2)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_timedelta(self):
dtype = {'b': CategoricalDtype(pd.to_timedelta(['1H', '2H', '3H']))}
data = "b\n1H\n2H\n3H"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_unexpected_categories(self):
dtype = {'b': CategoricalDtype(['a', 'b', 'd', 'e'])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = pd.DataFrame({"b": Categorical(list('dacd'),
dtype=dtype['b'])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ['a', 'b', 'c']
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'],
categories=cats)}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'],
categories=cats)},
index=[2, 3])]
dtype = CategoricalDtype(cats)
actuals = self.read_csv(StringIO(data), dtype={'b': dtype},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'),
np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
# see gh-9424
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one.1', dtype='f')], axis=1)
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
data = ''
result = self.read_csv(StringIO(data), names=['one', 'one'],
dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_raise_on_passed_int_dtype_with_nas(self):
# see gh-2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
sep=",", skipinitialspace=True,
dtype={'DOY': np.int64})
def test_dtype_with_converter(self):
data = """a,b
1.1,2.2
1.2,2.3"""
# dtype spec ignored if converted specified
with tm.assert_produces_warning(ParserWarning):
result = self.read_csv(StringIO(data), dtype={'a': 'i8'},
converters={'a': lambda x: str(x)})
expected = DataFrame({'a': ['1.1', '1.2'], 'b': [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
def test_empty_dtype(self):
# see gh-14712
data = 'a,b'
expected = pd.DataFrame(columns=['a', 'b'], dtype=np.float64)
result = self.read_csv(StringIO(data), header=0, dtype=np.float64)
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'a': pd.Categorical([]),
'b': pd.Categorical([])},
index=[])
result = self.read_csv(StringIO(data), header=0,
dtype='category')
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': 'category', 'b': 'category'})
tm.assert_frame_equal(result, expected)
expected =
|
pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
|
pandas.DataFrame
|
""" Given (1) two files (one with training and one with test data; both with headers),
(2) a file with the features that should be used for learning, and
(3) a compatible regression model, saved with pickle, to be used as a baseline
this script explores different ways of combining classification results with other sources of info
in order to recommend useful datasets for augmentation.
"""
import sys
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.utils import shuffle
from sklearn.metrics import f1_score, classification_report
from scipy.stats import pearsonr, kendalltau
import numpy as np
import pickle
GLOBAL_COLUMN_NAME = 'class'
TARGET_COLUMN = 'gain_in_r2_score'
POSITIVE_CLASS = 'good_gain'
NEGATIVE_CLASS = 'loss'
KEY_SEPARATOR = '*'
ALPHA = 0
def downsample_data(dataset, class_column):
"""This function downsamples the number of instances of a class that is over-represented in the dataset.
It's important to keep the learning 'fair'
"""
negative = dataset.loc[dataset[class_column] == NEGATIVE_CLASS]
positive = dataset.loc[dataset[class_column] == POSITIVE_CLASS]
sample_size = min([negative.shape[0], positive.shape[0]])
negative = negative.sample(n=sample_size, random_state=42)
positive = positive.sample(n=sample_size, random_state=42)
frames = [negative, positive]
return shuffle(pd.concat(frames), random_state=0)
def determine_classes_based_on_gain_in_r2_score(dataset, downsample=False):
"""This function determines the class of each row in the dataset based on the value
of TARGET_COLUMN
"""
gains = dataset[TARGET_COLUMN]
dataset[GLOBAL_COLUMN_NAME] = [POSITIVE_CLASS if i > ALPHA else NEGATIVE_CLASS for i in gains]
if downsample:
return downsample_data(dataset, GLOBAL_COLUMN_NAME)
return dataset
def generate_predictions(training, test, features, class_column=None):
"""This function creates a random forest classifier and generates
predictions for the test data
"""
if not class_column:
class_column = GLOBAL_COLUMN_NAME
training = determine_classes_based_on_gain_in_r2_score(training)
test = determine_classes_based_on_gain_in_r2_score(test)
X_train = training[features]
X_test = test[features]
y_train = training[class_column]
y_test = test[class_column]
clf = RandomForestClassifier(random_state=42, n_estimators=100)
clf.fit(X_train, y_train)
test['pred_' + class_column] = clf.predict(X_test)
print(classification_report(y_test, test['pred_' + class_column]))
test['prob_positive_' + class_column] = [i[0] for i in clf.predict_proba(X_test)]
model_filename = 'classifier-training-dataset-50-percent-class-column-' + class_column + '.sav'
pickle.dump(clf, open(model_filename, 'wb'))
return test
def parse_rows(dataset_with_predictions, class_column):
"""This function extracts different features for combinations of
query, target, and candidate
"""
candidates_per_query_target = {str(row['query']) + KEY_SEPARATOR + str(row['target']): {} for index, row in dataset_with_predictions.iterrows()}
for index, row in dataset_with_predictions.iterrows():
key = str(row['query']) + KEY_SEPARATOR + str(row['target'])
candidates_per_query_target[key][row['candidate']] = {TARGET_COLUMN: row[TARGET_COLUMN], class_column: row[class_column], 'pred': row['pred'], 'pred_prob': row['prob_positive_'+ class_column]}
return candidates_per_query_target
def compute_correlation_prob_class_target(candidates_per_query_target):
"""This function computes the overall correlation between the probability of being in
the positive class and the value of the target column
"""
probs_per_query_target = []
gains_per_query_target = []
for key in candidates_per_query_target.keys():
candidates = candidates_per_query_target[key].keys()
tmp_probs = [candidates_per_query_target[key][candidate]['pred_prob'] for candidate in candidates]
tmp_gains = [candidates_per_query_target[key][candidate][TARGET_COLUMN] for candidate in candidates]
probs_per_query_target += tmp_probs
gains_per_query_target += tmp_gains
return pearsonr(probs_per_query_target, gains_per_query_target)
def compute_precision_per_query_target(candidates_per_query_target):
"""This function computes the precision for the positive class for each query-target
"""
precs = []
for key in candidates_per_query_target.keys():
candidates = candidates_per_query_target[key].keys()
predicted_positive = 0
real_positive = 0
for candidate in candidates:
if candidates_per_query_target[key][candidate][class_column] == POSITIVE_CLASS:
real_positive += 1
if candidates_per_query_target[key][candidate]['pred'] == POSITIVE_CLASS:
predicted_positive += 1
if real_positive:
precs.append(predicted_positive/real_positive)
return precs
def compute_recall_for_top_k_candidates(candidates_per_query_target, k):
"""This function computes how many of the top k candidates we efficiently retrieve
"""
top_recall = []
num_cands = []
keys_with_at_least_k_relevant_gains = 0
for key in candidates_per_query_target.keys():
candidates = candidates_per_query_target[key].keys()
num_cands.append(len(candidates))
gains = []
for candidate in candidates:
gains.append((candidates_per_query_target[key][candidate][TARGET_COLUMN], candidates_per_query_target[key][candidate]['pred']))
relevant_gains = [i for i in sorted(gains)[-k:] if i[0] > ALPHA]
positive_right = 0
for (gain, class_) in relevant_gains:
if class_ == POSITIVE_CLASS:
positive_right += 1
if len(relevant_gains) >= k:
top_recall.append(positive_right/k)
keys_with_at_least_k_relevant_gains += 1
print('this recall was computed taking', keys_with_at_least_k_relevant_gains, 'keys out of', len(candidates_per_query_target.keys()), 'into account')
print('avg and median num of candidates per query-target pair', np.mean(num_cands), np.median(num_cands))
return top_recall
def analyze_predictions(test_with_preds):
"""This function separates all candidates for each
query-target pair and then analyzes how well the classification worked in
each case
"""
candidates_per_query_target = parse_rows(test_with_preds)
print('correlation between the probability of being in the positive class and the actual gains', compute_correlation_prob_class_target(candidates_per_query_target))
print('What is the average precision for positive class per query-target?', np.mean(compute_precision_per_query_target(candidates_per_query_target)))
print('What is the average recall for the top-5 candidates?', np.mean(compute_recall_for_top_k_candidates(candidates_per_query_target, 5)))
print('What is the average recall for the top-1 candidates?', np.mean(compute_recall_for_top_k_candidates(candidates_per_query_target, 1)))
print('What is the average recall for the top-3 candidates?', np.mean(compute_recall_for_top_k_candidates(candidates_per_query_target, 3)))
def build_regressor_for_ranking_positive_class(dataset, features, regression_target=TARGET_COLUMN):
"""This function builds a regressor based exclusively on positive class'
examples present in the dataset
"""
if regression_target in features:
print('The target for the regression task cannot be one of the features')
return
positive_examples = dataset.loc[dataset[TARGET_COLUMN] > ALPHA]
X = positive_examples[features]
y = positive_examples[regression_target]
regressor = RandomForestRegressor(random_state=20)
regressor.fit(X, y)
return regressor
def compute_modified_r_precision(real_gains, predicted_gains, k=5):
"""This function computes R-precision, which is the ratio between all the relevant documents
retrieved until the rank that equals the number of relevant documents you have in your collection in total (r),
to the total number of relevant documents in your collection R.
"""
relevant_documents = [elem[0] for elem in sorted(real_gains, key = lambda x:x[1], reverse=True) if elem[1] > 1][:k]
#print('real gains in compute_r_precision', real_gains)
#print('relevant documents (positive only)', relevant_documents)
predicted_ranking = [elem[0] for elem in sorted(predicted_gains, key = lambda x:x[1], reverse=True)][:len(relevant_documents)]
#print('predicted gains in compute_r_precision', predicted_gains)
#print('relevant documents (positive only)', predicted_ranking)
if relevant_documents and predicted_ranking:
return len(set(relevant_documents) & set(predicted_ranking))/len(predicted_ranking)
return float('nan')
def compute_r_recall(real_gains, predicted_gains, k=5, positive_only=True):
"""This function computes 'R-recall' (does it exist officially?), defined as the ratio between all the top-k relevant documents
retrieved until the rank k and the total number of retrieved documents (should be k).
In this setting, if positive_only == True, relevant documents correspond exclusively to candidates associated to real positive
gains. If there are no relevant documents in this case, this function returns 'nan'. Alternatively, if positive_only == False
we consider that the relevant documents are the k highest ranked candidates in real_gains.
"""
ranking = [elem[0] for elem in sorted(predicted_gains, key = lambda x:x[1], reverse=True)][:k]
if positive_only:
top_k_relevant_documents = [elem[0] for elem in sorted(real_gains, key = lambda x:x[1], reverse=True) if elem[1] > 0][:k]
else:
top_k_relevant_documents = [elem[0] for elem in sorted(real_gains, key = lambda x:x[1], reverse=True)][:k]
if top_k_relevant_documents and ranking:
return len(set(top_k_relevant_documents) & set(ranking))/len(ranking)
return float('nan')
def compute_average_precision(real_gains, predicted_gains):
"""This function computes average precision, which is the average of precision at k values for k=1..len(real_gains).
Average precision values can later be used for the computation of MAP (Mean Average Precision)
"""
precs = [compute_r_precision(real_gains, predicted_gains, k=x+1) for x in range(len(real_gains))]
if not precs:
return 0.
return np.mean(precs)
def compute_precision_at_k(real_gains, predicted_gains, k=5):
"""This function computes precision-at-k, i.e. the proportion of recommended items in the top-k set
that are relevant (belong to real_gains).
NOTE THAT IT IS NOT AS STRICT AS compute_r_precision!
"""
relevant_documents = [elem[0] for elem in real_gains]
retrieved_documents = [elem[0] for elem in sorted(predicted_gains, key = lambda x:x[1], reverse=True)][:k]
if relevant_documents and retrieved_documents:
return len(set(relevant_documents) & set(retrieved_documents))/len(retrieved_documents)
return float('nan')
def compute_recall_at_k(real_gains, predicted_gains, k=5):
"""This function computes recall-at-k, i.e. the proportion of relevant items found in the top-k
recommendations.
"""
relevant_documents = [elem[0] for elem in real_gains]
retrieved_documents = [elem[0] for elem in sorted(predicted_gains, key = lambda x:x[1], reverse=True)][:k]
if relevant_documents and retrieved_documents:
return len(set(relevant_documents) & set(retrieved_documents))/len(relevant_documents)
return float('nan')
def rank_candidates_classified_as_positive(test_with_preds, regressor, features, baseline_regressor):
"""This function gets all candidates for each (query, target) tuple, selects those that were classified as positive, and
uses a regressor to rank them.
"""
candidates_per_query_target = parse_rows(test_with_preds)
avg_precs = [[] for i in range(51)]
avg_precs_certain = [[] for i in range(51)]
avg_precs_containment = [[] for i in range(51)]
avg_precs_baseline_regressor = [[] for i in range(51)]
avg_precs_classif_containment = [[] for i in range(51)]
avg_precs_classif_certain_containment = [[] for i in range(51)]
avg_precs_classif_pearson = [[] for i in range(51)]
avg_precs_classif_certain_pearson = [[] for i in range(51)]
avg_recs = [[] for i in range(51)]
avg_recs_certain = [[] for i in range(51)]
avg_recs_containment = [[] for i in range(51)]
avg_recs_baseline_regressor = [[] for i in range(51)]
avg_recs_classif_containment = [[] for i in range(51)]
avg_recs_classif_certain_containment = [[] for i in range(51)]
avg_recs_classif_pearson = [[] for i in range(51)]
avg_recs_classif_certain_pearson = [[] for i in range(51)]
avg_strict_r_precisions = [[] for i in range(51)]
avg_strict_r_precisions_certain = [[] for i in range(51)]
avg_strict_r_precisions_containment = [[] for i in range(51)]
avg_strict_r_precisions_baseline_regressor = [[] for i in range(51)]
avg_strict_r_precisions_classif_containment = [[] for i in range(51)]
avg_strict_r_precisions_classif_certain_containment = [[] for i in range(51)]
avg_strict_r_precisions_classif_pearson = [[] for i in range(51)]
avg_strict_r_precisions_classif_certain_pearson = [[] for i in range(51)]
avg_strict_r_recalls = [[] for i in range(51)]
avg_strict_r_recalls_certain = [[] for i in range(51)]
avg_strict_r_recalls_containment = [[] for i in range(51)]
avg_strict_r_recalls_baseline_regressor = [[] for i in range(51)]
avg_strict_r_recalls_classif_containment = [[] for i in range(51)]
avg_strict_r_recalls_classif_certain_containment = [[] for i in range(51)]
avg_strict_r_recalls_classif_pearson = [[] for i in range(51)]
avg_strict_r_recalls_classif_certain_pearson = [[] for i in range(51)]
numbers_of_retrieved_candidates = []
for key in candidates_per_query_target.keys():
query, target = key.split(KEY_SEPARATOR)
instances = test_with_preds.loc[(test_with_preds['query'] == query) & (test_with_preds['target'] == target)]
truly_positive = instances.loc[test_with_preds[class_column] == POSITIVE_CLASS]
predicted_positive_certain = instances.loc[(test_with_preds['pred'] == POSITIVE_CLASS) & (test_with_preds['prob_positive_class'] > 0.6)]
predicted_positive = instances.loc[test_with_preds['pred'] == POSITIVE_CLASS]
if truly_positive.shape[0] and predicted_positive.shape[0] and predicted_positive_certain.shape[0]:
real_gains = truly_positive[['candidate', TARGET_COLUMN]].values.tolist()
classifier_and_regressor_gains = [[candidate, estimated_gain] for candidate, estimated_gain in zip(predicted_positive['candidate'], regressor.predict(predicted_positive[features]))]
classifier_certain_and_regressor_gains = [[candidate, estimated_gain] for candidate, estimated_gain in zip(predicted_positive_certain['candidate'], regressor.predict(predicted_positive_certain[features]))]
classifier_and_containment_gains = predicted_positive[['candidate', 'containment_fraction']].values.tolist()
classifier_certain_and_containment_gains = predicted_positive_certain[['candidate', 'containment_fraction']].values.tolist()
classifier_and_max_pearson_diff_gains = predicted_positive[['candidate', 'max_pearson_difference']].values.tolist()
classifier_certain_and_max_pearson_diff_gains = predicted_positive_certain[['candidate', 'max_pearson_difference']].values.tolist()
containment_baseline_gains = instances[['candidate', 'containment_fraction']].values.tolist()
max_pearson_diff_baseline_gains = instances[['candidate', 'max_pearson_difference']].values.tolist()
baseline_regressor_gains = [[candidate, baseline_gain] for candidate, baseline_gain in zip(instances['candidate'], regressor.predict(instances[features]))]
for index in range(51):
avg_precs[index].append(compute_precision_at_k(real_gains, classifier_and_regressor_gains, k=index))
avg_precs_certain[index].append(compute_precision_at_k(real_gains, classifier_certain_and_regressor_gains, k=index))
avg_precs_classif_containment[index].append(compute_precision_at_k(real_gains, classifier_and_containment_gains, k=index))
avg_precs_classif_certain_containment[index].append(compute_precision_at_k(real_gains, classifier_certain_and_containment_gains, k=index))
avg_precs_classif_pearson[index].append(compute_precision_at_k(real_gains, classifier_and_max_pearson_diff_gains, k=index))
avg_precs_classif_certain_pearson[index].append(compute_precision_at_k(real_gains, classifier_certain_and_max_pearson_diff_gains, k=index))
avg_precs_containment[index].append(compute_precision_at_k(real_gains, containment_baseline_gains, k=index))
avg_precs_baseline_regressor[index].append(compute_precision_at_k(real_gains, baseline_regressor_gains, k=index))
avg_recs[index].append(compute_recall_at_k(real_gains, classifier_and_regressor_gains, k=index))
avg_recs_certain[index].append(compute_recall_at_k(real_gains, classifier_certain_and_regressor_gains, k=index))
avg_recs_classif_containment[index].append(compute_recall_at_k(real_gains, classifier_and_containment_gains, k=index))
avg_recs_classif_certain_containment[index].append(compute_recall_at_k(real_gains, classifier_certain_and_containment_gains, k=index))
avg_recs_classif_pearson[index].append(compute_recall_at_k(real_gains, classifier_and_max_pearson_diff_gains, k=index))
avg_recs_classif_certain_pearson[index].append(compute_recall_at_k(real_gains, classifier_certain_and_max_pearson_diff_gains, k=index))
avg_recs_containment[index].append(compute_recall_at_k(real_gains, containment_baseline_gains, k=index))
avg_recs_baseline_regressor[index].append(compute_recall_at_k(real_gains, baseline_regressor_gains, k=index))
avg_strict_r_precisions[index].append(compute_modified_r_precision(real_gains, classifier_and_regressor_gains, k=index))
avg_strict_r_precisions_certain[index].append(compute_modified_r_precision(real_gains, classifier_certain_and_regressor_gains, k=index))
avg_strict_r_precisions_classif_containment[index].append(compute_modified_r_precision(real_gains, classifier_and_containment_gains, k=index))
avg_strict_r_precisions_classif_certain_containment[index].append(compute_modified_r_precision(real_gains, classifier_certain_and_containment_gains, k=index))
avg_strict_r_precisions_classif_pearson[index].append(compute_modified_r_precision(real_gains, classifier_and_max_pearson_diff_gains, k=index))
avg_strict_r_precisions_classif_certain_pearson[index].append(compute_modified_r_precision(real_gains, classifier_certain_and_max_pearson_diff_gains, k=index))
avg_strict_r_precisions_containment[index].append(compute_modified_r_precision(real_gains, containment_baseline_gains, k=index))
avg_strict_r_precisions_baseline_regressor[index].append(compute_modified_r_precision(real_gains, baseline_regressor_gains, k=index))
avg_strict_r_recalls[index].append(compute_r_recall(real_gains, classifier_and_regressor_gains, k=index))
avg_strict_r_recalls_certain[index].append(compute_r_recall(real_gains, classifier_certain_and_regressor_gains, k=index))
avg_strict_r_recalls_classif_containment[index].append(compute_r_recall(real_gains, classifier_and_containment_gains, k=index))
avg_strict_r_recalls_classif_certain_containment[index].append(compute_r_recall(real_gains, classifier_certain_and_containment_gains, k=index))
avg_strict_r_recalls_classif_pearson[index].append(compute_r_recall(real_gains, classifier_and_max_pearson_diff_gains, k=index))
avg_strict_r_recalls_classif_certain_pearson[index].append(compute_r_recall(real_gains, classifier_certain_and_max_pearson_diff_gains, k=index))
avg_strict_r_recalls_containment[index].append(compute_r_recall(real_gains, containment_baseline_gains, k=index))
avg_strict_r_recalls_baseline_regressor[index].append(compute_r_recall(real_gains, baseline_regressor_gains, k=index))
#break
numbers_of_retrieved_candidates.append(predicted_positive.shape[0])
print('average number of candidates per query-target (predicted as positive)', np.mean(numbers_of_retrieved_candidates))
for index in range(51):
# print('Prec@' + str(index) + ' - Classifier + Regressor:', np.mean(avg_precs[index]))
# print('Prec@' + str(index) + ' - Classifier (certain) + Regressor:', np.mean(avg_precs[index]))
# print('Prec@' + str(index) + ' - Classifier + Containment:', np.mean(avg_precs_classif_containment[index]))
# print('Prec@' + str(index) + ' - Classifier (certain) + Containment:', np.mean(avg_precs_classif_certain_containment[index]))
# print('Prec@' + str(index) + ' - Classifier + Max-Pearson-Diff:', np.mean(avg_precs_classif_pearson[index]))
# print('Prec@' + str(index) + ' - Classifier (certain) + Max-Pearson-Diff:', np.mean(avg_precs_classif_certain_pearson[index]))
# print('Prec@' + str(index) + ' - Containment baseline:', np.mean(avg_precs_containment[index]))
# print('Prec@' + str(index) + ' - Regression baseline:', np.mean(avg_precs_baseline_regressor[index]))
# print('Rec@' + str(index) + ' - Classifier + Regressor:', np.mean(avg_recs[index]))
# print('Rec@' + str(index) + ' - Classifier (certain) + Regressor:', np.mean(avg_recs[index]))
# print('Rec@' + str(index) + ' - Classifier + Containment:', np.mean(avg_recs_classif_containment[index]))
# print('Rec@' + str(index) + ' - Classifier (certain) + Containment:', np.mean(avg_recs_classif_certain_containment[index]))
# print('Rec@' + str(index) + ' - Classifier + Max-Pearson-Diff:', np.mean(avg_recs_classif_pearson[index]))
# print('Rec@' + str(index) + ' - Classifier (certain) + Max-Pearson-Diff:', np.mean(avg_recs_classif_certain_pearson[index]))
# print('Rec@' + str(index) + ' - Containment baseline:', np.mean(avg_recs_containment[index]))
# print('Rec@' + str(index) + ' - Regression baseline:', np.mean(avg_recs_baseline_regressor[index]))
print('Strict R-Precision (k=' + str(index) + ') - Classifier + Regressor:', np.mean([elem for elem in avg_strict_r_precisions[index] if not np.isnan(elem)]))
print('Strict R-Precision (k=' + str(index) + ') - Classifier (certain) + Regressor:', np.mean([elem for elem in avg_strict_r_precisions_certain[index] if not np.isnan(elem)]))
print('Strict R-Precision (k=' + str(index) + ') - Classifier + Containment:', np.mean([elem for elem in avg_strict_r_precisions_classif_containment[index] if not np.isnan(elem)]))
print('Strict R-Precision (k=' + str(index) + ') - Classifier (certain) + Containment:', np.mean([elem for elem in avg_strict_r_precisions_classif_certain_containment[index] if not np.isnan(elem)]))
print('Strict R-Precision (k=' + str(index) + ') - Classifier + Max-Pearson-Diff:', np.mean([elem for elem in avg_strict_r_precisions_classif_pearson[index] if not np.isnan(elem)]))
print('Strict R-Precision (k=' + str(index) + ') - Classifier (certain) + Max-Pearson-Diff:', np.mean([elem for elem in avg_strict_r_precisions_classif_certain_pearson[index] if not np.isnan(elem)]))
print('Strict R-Precision (k=' + str(index) + ') - Containment baseline:', np.mean([elem for elem in avg_strict_r_precisions_containment[index] if not np.isnan(elem)]))
print('Strict R-Precision (k=' + str(index) + ') - Regression baseline:', np.mean([elem for elem in avg_strict_r_precisions_baseline_regressor[index] if not np.isnan(elem)]))
# print('Strict R-Recall (k=' + str(index) + ') - Classifier + Regressor:', np.mean(avg_strict_r_recalls[index]))
# print('Strict R-Recall (k=' + str(index) + ') - Classifier (certain) + Regressor:', np.mean(avg_strict_r_recalls[index]))
# print('Strict R-Recall (k=' + str(index) + ') - Classifier + Containment:', np.mean(avg_strict_r_recalls_classif_containment[index]))
# print('Strict R-Recall (k=' + str(index) + ') - Classifier (certain) + Containment:', np.mean(avg_strict_r_recalls_classif_certain_containment[index]))
# print('Strict R-Recall (k=' + str(index) + ') - Classifier + Max-Pearson-Diff:', np.mean(avg_strict_r_recalls_classif_pearson[index]))
# print('Strict R-Recall (k=' + str(index) + ') - Classifier (certain) + Max-Pearson-Diff:', np.mean(avg_strict_r_recalls_classif_certain_pearson[index]))
# print('Strict R-Recall (k=' + str(index) + ') - Containment baseline:', np.mean(avg_strict_r_recalls_containment[index]))
# print('Strict R-Recall (k=' + str(index) + ') - Regression baseline:', np.mean(avg_strict_r_recalls_baseline_regressor[index]))
if __name__ == '__main__':
training_filename = sys.argv[1]
test_filename = sys.argv[2]
features = eval(open(sys.argv[3]).readline())
#baseline_regressor = pickle.load(open(sys.argv[4], 'rb'))
training_data = pd.read_csv(training_filename)
test_data =
|
pd.read_csv(test_filename)
|
pandas.read_csv
|
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CallbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'invalid_base'),
(0, 'invalid_base'),
])
def test_invalid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
filled for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session=pd.Timestamp("2006-01-06", tz='UTC'),
capital_base=cap_base,
data_frequency="minute",
trading_calendar=self.trading_calendar
)
with self.assertRaises(ZeroCapitalError) as exc:
# make_algo will trace to TradingAlgorithm,
# where the exception will be raised
self.make_algo(script=algo_text, sim_params=sim_params)
# Make sure the correct error was raised
error = exc.exception
self.assertEqual(str(error),
'initial capital base must be greater than zero')
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-04 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-05 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_schedule_function_custom_cal(self):
# run a simulation on the CMES cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.CN_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.CN_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.append(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.append(get_datetime())
"""
algo = self.make_algo(
script=algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("XSHG"),
)
)
algo.run()
nyse = get_calendar("XSHG")
for minute in algo.nyse_opens:
# each minute should be a nyse session open
session_label = nyse.minute_to_session_label(minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, minute)
for minute in algo.nyse_closes:
# each minute should be a minute before a nyse session close
session_label = nyse.minute_to_session_label(minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(minutes=1), minute)
# Test that passing an invalid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from trading_calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('XNYS'))
def my_func(context, data):
pass
"""
)
algo = self.make_algo(
script=erroring_algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("CMES"),
),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run()
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_called += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
)
algo.run()
self.assertEqual(algo.func_called, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_stack = []
def pre(data):
function_stack.append(pre)
collected_data_pre.append(data)
def post(data):
function_stack.append(post)
collected_data_post.append(data)
def initialize(context):
context.add_event(Always(), f)
context.add_event(Always(), g)
def handle_data(context, data):
function_stack.append(handle_data)
expected_data.append(data)
def f(context, data):
function_stack.append(f)
def g(context, data):
function_stack.append(g)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
create_event_context=CallbackManager(pre, post),
)
algo.run()
self.assertEqual(len(expected_data), 480)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_stack),
2400,
'Incorrect number of functions called: %s != 2400' %
len(function_stack),
)
expected_functions = [pre, handle_data, f, g, post] * 60030
for n, (f, g) in enumerate(zip(function_stack, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = self.make_algo(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
)
# Schedule something for NOT Always.
# Compose two rules to ensure calendar is set properly.
algo.schedule_function(nop, time_rule=Never() & Always())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, OncePerDay)
self.assertEqual(event_rule.cal, algo.trading_calendar)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, ComposedRule)
self.assertEqual(inner_rule.cal, algo.trading_calendar)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, Always)
self.assertEqual(first.cal, algo.trading_calendar)
self.assertEqual(second.cal, algo.trading_calendar)
if mode == 'daily':
self.assertIsInstance(second, Always)
else:
self.assertIsInstance(second, ComposedRule)
self.assertIsInstance(second.first, Never)
self.assertEqual(second.first.cal, algo.trading_calendar)
self.assertIsInstance(second.second, Always)
self.assertEqual(second.second.cal, algo.trading_calendar)
self.assertIs(composer, ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = self.make_algo()
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = self.make_algo()
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
class TestSetSymbolLookupDate(zf.WithMakeAlgo, zf.ZiplineTestCase):
# January 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-06', tz='UTC')
SIM_PARAMS_START_DATE =
|
pd.Timestamp('2006-01-05', tz='UTC')
|
pandas.Timestamp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.